hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
311c7735fdf609bdc8f1e5a2ec45500065ccbac7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <ATen/native/UnaryOps.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/LaunchUtils.h> #include <ATen/AccumulateType.h> #include <THH/THHReduceApplyUtils.cuh> #include <THH/THHTensorMathReduce.cuh> #include <THH/THHNumerics.cuh> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_kernel.h> namespace at { namespace native { namespace { #define MAX_NUM_BLOCKS 64 // Normalizes the L1 norm of every row to 1; used by multinomial template <typename scalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void renormRowsL1(scalar_t* dist, long rows, long cols) { extern __shared__ unsigned char my_smem[]; scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem); scalar_t zero = static_cast<scalar_t>(0); scalar_t val; for (int64_t row = blockIdx.x; row < rows; row += gridDim.x) { scalar_t sum = static_cast<scalar_t>(0); for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) { val = dist[row * cols + col]; assert(!THCNumerics<scalar_t>::lt(val, zero)); // ! < 0 for NaN handling sum = sum + val; } sum = reduceBlock(smem, blockDim.x, sum, ReduceAdd<scalar_t>(), zero); if (threadIdx.x == 0) { assert(!THCNumerics<scalar_t>::lt(val, zero)); // ! < 0 for NaN handling smem[0] = sum; } __syncthreads(); sum = smem[0]; if (sum > zero) { for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) { dist[row * cols + col] = dist[row * cols + col] / sum; } } } } void renormRows(Tensor& t) { TORCH_CHECK(t.dim() == 2); int64_t rows = t.size(0); int64_t cols = t.size(1); auto props = at::cuda::getCurrentDeviceProperties(); assert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; dim3 grid(rows < numSM * 4 ? rows : numSM * 4); dim3 block(cols < maxThreads ? cols : maxThreads); AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "renormRows_cuda", [&] { hipLaunchKernelGGL(( renormRowsL1<scalar_t>) , dim3(grid), dim3(block), block.x * sizeof(scalar_t), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), t.data_ptr<scalar_t>(), rows, cols); }); } template <typename scalar_t> __device__ int binarySearchForMultinomial(scalar_t* cumdist, scalar_t* dist, int size, scalar_t val) { int start = 0; int end = size; // cumdist[size - 1] = 0 => all zero prob dist assert(cumdist[size - 1] > static_cast<scalar_t>(0)); while (end - start > 0) { int mid = start + (end - start) / 2; scalar_t midVal = cumdist[mid]; if (midVal < val) { start = mid + 1; } else { end = mid; } } if (start == size) { // No probability mass or precision problems; just return the // first non-zero element by setting start to size-1 here, // the code below will move it to the last non-zero probability // this actually can happen when the random number is 1 // (github pytorch issue #4858). start = size - 1; } while(start >= 1 && dist[start] == 0) start--; return start; } template <typename scalar_t> __global__ void sampleMultinomialWithReplacement(std::pair<uint64_t, uint64_t> seeds, int totalSamples, int64_t* dest, int64_t distributions, int categories, scalar_t* normDistPrefixSum, scalar_t* normDist) { // At the moment, each warp computes one sample value in the binary // search due to divergence. It seems possible to compute multiple // values and limit divergence though later on. // global index formula for 1D grid of 2D blocks int idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init(seeds.first, idx, seeds.second, &state); // The block determines the distribution for which we generate a point for (int64_t curDist = blockIdx.x; curDist < distributions; curDist += gridDim.x) { for (int sampleBase = 0; sampleBase < totalSamples; sampleBase += blockDim.y) { // The warp determines the sample int sample = sampleBase + threadIdx.y; // All threads participate in this auto rand = hiprand_uniform4(&state); scalar_t r = static_cast<scalar_t>(rand.x); if (threadIdx.x == 0 && sample < totalSamples) { // Find the bucket that a uniform sample lies in int choice = binarySearchForMultinomial<scalar_t>( normDistPrefixSum + curDist * categories, normDist + curDist * categories, categories, r); // Torch indices are 1-based dest[curDist * totalSamples + sample] = choice; } } } } template <typename scalar_t> __global__ void sampleMultinomialWithoutReplacement(std::pair<uint64_t, uint64_t> seeds, int totalSamples, int sample, int64_t* dest, int64_t distributions, int categories, scalar_t* origDist, scalar_t* normDistPrefixSum) { // At the moment, each warp computes one sample value in the binary // search due to divergence. It seems possible to compute multiple // values and limit divergence though later on. // global index formula for 1D grid of 2D blocks int idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init(seeds.first, idx, seeds.second, &state); // The block and warp determines the distribution for which we // generate a point for (int64_t curDistBase = blockIdx.x * blockDim.y; curDistBase < distributions; curDistBase += gridDim.x * blockDim.y) { // The warp determines the distribution int64_t curDist = curDistBase + threadIdx.y; // All threads must participate in this auto rand = hiprand_uniform4(&state); scalar_t r = static_cast<scalar_t>(rand.x); if (threadIdx.x == 0 && curDist < distributions) { // Find the bucket that a uniform sample lies in int choice = binarySearchForMultinomial<scalar_t>( normDistPrefixSum + curDist * categories, origDist + curDist * categories, categories, r); // Torch indices are 1-based dest[curDist * totalSamples + sample] = choice; // Without replacement, so update the original probability so it // is not considered a second time origDist[curDist * categories + choice] = static_cast<scalar_t>(0); } } } template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void sampleMultinomialOnce(int64_t* dest, int64_t distributions, int categories, scalar_t* sampled, scalar_t* dist, int stride_dist, // dist->stride(0) int stride_categories // dist->stride(1) ) { extern __shared__ unsigned char my_smem[]; __shared__ bool found; // Shared Memory hold blockdim.x T for holding the cumulative sum, // blockDim.x AccT for normalizing the probabilities, scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem); accscalar_t *asmem = reinterpret_cast<accscalar_t *>(&my_smem[blockDim.x * sizeof(scalar_t)]); accscalar_t accZero = static_cast<accscalar_t>(0); scalar_t zero = static_cast<scalar_t>(0); for (int64_t curDist = blockIdx.x; curDist < distributions; curDist += gridDim.x) { // Each block handles one distribution // First pass, find the total sum of the distribution accscalar_t sum = accZero; scalar_t val; for (int cat = threadIdx.x; cat < categories; cat += blockDim.x) { val = dist[curDist * stride_dist + cat * stride_categories]; assert(val >= zero); assert(!THCNumerics<scalar_t>::isinf(val)); assert(!THCNumerics<scalar_t>::isnan(val)); sum = sum + static_cast<accscalar_t>(val); } // threadIdx.x == 0 has the sum value from this sum = reduceBlock(asmem, blockDim.x, sum, ReduceAdd<accscalar_t>(), accZero); // Broadcast sum and sample value if (threadIdx.x == 0) { // Make sure the sum of our distribution didn't overflow assert(!THCNumerics<accscalar_t>::isinf(sum)); assert(sum > accZero); asmem[0] = sum; smem[0] = sampled[curDist]; } __syncthreads(); sum = asmem[0]; scalar_t sample = smem[0]; __syncthreads(); if (sum == accZero) { // Choose the first element if (threadIdx.x == 0) { dest[curDist] = 0; } continue; } int chunks = (categories + (int)blockDim.x - 1) / blockDim.x; scalar_t prevHighProb = zero; found = false; for (int chunk = 0; chunk < chunks && !found; ++chunk) { // All threads in bounds load a value int cat = chunk * blockDim.x + threadIdx.x; accscalar_t a_dist_val = cat < categories ? static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum : accZero; scalar_t dist_val = static_cast<scalar_t>(a_dist_val); smem[threadIdx.x] = dist_val; __syncthreads(); // Perform an inclusive prefix sum of the shared memory contents for (int offset = 1; offset < blockDim.x; offset *= 2) { scalar_t val = zero; if (threadIdx.x >= offset) { val = smem[threadIdx.x - offset] + smem[threadIdx.x]; } __syncthreads(); if (threadIdx.x >= offset) { smem[threadIdx.x] = val; } __syncthreads(); } // Each thread will check to see if the sample falls in its // bucket scalar_t curBucket = smem[threadIdx.x] + prevHighProb; scalar_t prevBucket = threadIdx.x == 0 ? prevHighProb : smem[threadIdx.x - 1] + prevHighProb; bool inBucket = (cat < categories) && (!(sample >= curBucket) && (sample >= prevBucket) && (dist_val > zero)); if (inBucket) { // We're done; we have the sample // Torch indices are 1-based dest[curDist] = cat; found = true; } // Store the previous scan's high value for future use prevHighProb = prevHighProb + smem[blockDim.x - 1]; __syncthreads(); } if (threadIdx.x == 0 && !found) { // This should address a rare bug where we don't select a valid index. This likely occurs when // due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but // and our uniform sample is greater than this value. In this case we likely have unitialized memory // in dest[curDist]. So basically we will loop through the distribution and pick the largest index // where the distribution is non-zero. This is obviously terribly inefficient, but due to the // rarity in which this occurs, this should not be an issue. for (int cat = categories - 1; cat >= 0; --cat) { if (dist[curDist * stride_dist + cat * stride_categories] > zero) { dest[curDist] = cat; break; } } } } } void multinomial_kernel_impl(Tensor& result, const Tensor& self, const int64_t n_sample, const bool with_replacement, Generator* generator) { auto gen = get_generator_or_default<CUDAGenerator>(generator, cuda::detail::getDefaultCUDAGenerator()); int inputSize = self.dim(); int64_t numDist = inputSize == 1 ? 1 : self.size(0); int numCategories = inputSize == 1 ? self.size(0) : self.size(1); // Restructure data for 2d auto self_v = inputSize == 1 ? self.view({numDist, numCategories}) : self; result.resize_({numDist, n_sample}); AT_DISPATCH_FLOATING_TYPES(self_v.scalar_type(), "multinomial_kernel_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto props = at::cuda::getCurrentDeviceProperties(); assert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; int maxShared = props->sharedMemPerBlock; int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads) * (sizeof(scalar_t) + sizeof(accscalar_t)); if (n_sample == 1 && maxShared >= requiredShared) { // Optimized allocation-free implementation // To exploit greater parallelism for the sampling, generate the // Uniform random samples in a separate kernel launch, into // temporarily allocated memory. The device RNG is thread-limited Tensor sampled = native::empty_cuda({numDist, n_sample}, self_v.options()); at::native::uniform_cuda_(sampled, 0.0, 1.0, gen); dim3 block(numCategories < maxThreads ? numCategories : maxThreads); dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4); hipLaunchKernelGGL(( sampleMultinomialOnce<scalar_t, accscalar_t>) , dim3(grid), dim3(block), requiredShared, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), result.data_ptr<int64_t>(), numDist, numCategories, sampled.data_ptr<scalar_t>(), self_v.data_ptr<scalar_t>(), self_v.stride(0), self_v.stride(1) ); } else { // Generic, slow implementation with memory allocations // For sampling without replacement, we modify the distribution // for subsequent samples in this space Tensor origDist = native::empty_like(self_v); origDist.copy_(self_v); Tensor normDist = native::empty_like(self_v); Tensor prefixSum = native::empty_like(self_v); // Renorm along rows normDist.copy_(origDist); renormRows(normDist); // Prefix sum along rows legacy::cuda::_th_cumsum_out(prefixSum, normDist, 1); std::pair<uint64_t, uint64_t> rng_engine_inputs; if (with_replacement) { { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); // each thread will utilize one random, however, since we have to use // hiprand_uniform4 (See Note [Register spilling in hiprand call for CUDA < 10]), // offset is 4. rng_engine_inputs = gen->philox_engine_inputs(4); } // Sample with replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from one // distribution concurrently. dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS); hipLaunchKernelGGL(( sampleMultinomialWithReplacement) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), rng_engine_inputs, n_sample, result.data_ptr<int64_t>(), numDist, numCategories, prefixSum.data_ptr<scalar_t>(), normDist.data_ptr<scalar_t>()); } else { // Sample without replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from a different // distribution concurrently. ptrdiff_t numBlocks = (numDist + 4 - 1) / 4; dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS); for (int sample = 0; sample < n_sample; ++sample) { if (sample > 0) { // Update probabilities // Renorm along rows normDist.copy_(origDist); renormRows(normDist); // Prefix sum along rows legacy::cuda::_th_cumsum_out(prefixSum, normDist, 1); } { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); // each thread will utilize one random, however, since we have to use // hiprand_uniform4 (See Note [Register spilling in hiprand call for CUDA < 10]), // offset is 4. rng_engine_inputs = gen->philox_engine_inputs(4); } // The kernel can only draw one sample before we have to // recalculate our distribution hipLaunchKernelGGL(( sampleMultinomialWithoutReplacement) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), rng_engine_inputs, n_sample, sample, result.data_ptr<int64_t>(), numDist, numCategories, origDist.data_ptr<scalar_t>(), prefixSum.data_ptr<scalar_t>()); } } } }); if (inputSize == 1) { result.resize_({n_sample}); } } } REGISTER_DISPATCH(multinomial_stub, &multinomial_kernel_impl); }}
311c7735fdf609bdc8f1e5a2ec45500065ccbac7.cu
#include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <ATen/native/UnaryOps.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/LaunchUtils.h> #include <ATen/AccumulateType.h> #include <THC/THCReduceApplyUtils.cuh> #include <THC/THCTensorMathReduce.cuh> #include <THC/THCNumerics.cuh> #include <curand.h> #include <curand_kernel.h> #include <curand_philox4x32_x.h> namespace at { namespace native { namespace { #define MAX_NUM_BLOCKS 200 // Normalizes the L1 norm of every row to 1; used by multinomial template <typename scalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void renormRowsL1(scalar_t* dist, long rows, long cols) { extern __shared__ unsigned char my_smem[]; scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem); scalar_t zero = static_cast<scalar_t>(0); scalar_t val; for (int64_t row = blockIdx.x; row < rows; row += gridDim.x) { scalar_t sum = static_cast<scalar_t>(0); for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) { val = dist[row * cols + col]; assert(!THCNumerics<scalar_t>::lt(val, zero)); // ! < 0 for NaN handling sum = sum + val; } sum = reduceBlock(smem, blockDim.x, sum, ReduceAdd<scalar_t>(), zero); if (threadIdx.x == 0) { assert(!THCNumerics<scalar_t>::lt(val, zero)); // ! < 0 for NaN handling smem[0] = sum; } __syncthreads(); sum = smem[0]; if (sum > zero) { for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) { dist[row * cols + col] = dist[row * cols + col] / sum; } } } } void renormRows(Tensor& t) { TORCH_CHECK(t.dim() == 2); int64_t rows = t.size(0); int64_t cols = t.size(1); auto props = at::cuda::getCurrentDeviceProperties(); assert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; dim3 grid(rows < numSM * 4 ? rows : numSM * 4); dim3 block(cols < maxThreads ? cols : maxThreads); AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "renormRows_cuda", [&] { renormRowsL1<scalar_t> <<<grid, block, block.x * sizeof(scalar_t), at::cuda::getCurrentCUDAStream()>>>(t.data_ptr<scalar_t>(), rows, cols); }); } template <typename scalar_t> __device__ int binarySearchForMultinomial(scalar_t* cumdist, scalar_t* dist, int size, scalar_t val) { int start = 0; int end = size; // cumdist[size - 1] = 0 => all zero prob dist assert(cumdist[size - 1] > static_cast<scalar_t>(0)); while (end - start > 0) { int mid = start + (end - start) / 2; scalar_t midVal = cumdist[mid]; if (midVal < val) { start = mid + 1; } else { end = mid; } } if (start == size) { // No probability mass or precision problems; just return the // first non-zero element by setting start to size-1 here, // the code below will move it to the last non-zero probability // this actually can happen when the random number is 1 // (github pytorch issue #4858). start = size - 1; } while(start >= 1 && dist[start] == 0) start--; return start; } template <typename scalar_t> __global__ void sampleMultinomialWithReplacement(std::pair<uint64_t, uint64_t> seeds, int totalSamples, int64_t* dest, int64_t distributions, int categories, scalar_t* normDistPrefixSum, scalar_t* normDist) { // At the moment, each warp computes one sample value in the binary // search due to divergence. It seems possible to compute multiple // values and limit divergence though later on. // global index formula for 1D grid of 2D blocks int idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init(seeds.first, idx, seeds.second, &state); // The block determines the distribution for which we generate a point for (int64_t curDist = blockIdx.x; curDist < distributions; curDist += gridDim.x) { for (int sampleBase = 0; sampleBase < totalSamples; sampleBase += blockDim.y) { // The warp determines the sample int sample = sampleBase + threadIdx.y; // All threads participate in this auto rand = curand_uniform4(&state); scalar_t r = static_cast<scalar_t>(rand.x); if (threadIdx.x == 0 && sample < totalSamples) { // Find the bucket that a uniform sample lies in int choice = binarySearchForMultinomial<scalar_t>( normDistPrefixSum + curDist * categories, normDist + curDist * categories, categories, r); // Torch indices are 1-based dest[curDist * totalSamples + sample] = choice; } } } } template <typename scalar_t> __global__ void sampleMultinomialWithoutReplacement(std::pair<uint64_t, uint64_t> seeds, int totalSamples, int sample, int64_t* dest, int64_t distributions, int categories, scalar_t* origDist, scalar_t* normDistPrefixSum) { // At the moment, each warp computes one sample value in the binary // search due to divergence. It seems possible to compute multiple // values and limit divergence though later on. // global index formula for 1D grid of 2D blocks int idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init(seeds.first, idx, seeds.second, &state); // The block and warp determines the distribution for which we // generate a point for (int64_t curDistBase = blockIdx.x * blockDim.y; curDistBase < distributions; curDistBase += gridDim.x * blockDim.y) { // The warp determines the distribution int64_t curDist = curDistBase + threadIdx.y; // All threads must participate in this auto rand = curand_uniform4(&state); scalar_t r = static_cast<scalar_t>(rand.x); if (threadIdx.x == 0 && curDist < distributions) { // Find the bucket that a uniform sample lies in int choice = binarySearchForMultinomial<scalar_t>( normDistPrefixSum + curDist * categories, origDist + curDist * categories, categories, r); // Torch indices are 1-based dest[curDist * totalSamples + sample] = choice; // Without replacement, so update the original probability so it // is not considered a second time origDist[curDist * categories + choice] = static_cast<scalar_t>(0); } } } template <typename scalar_t, typename accscalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void sampleMultinomialOnce(int64_t* dest, int64_t distributions, int categories, scalar_t* sampled, scalar_t* dist, int stride_dist, // dist->stride(0) int stride_categories // dist->stride(1) ) { extern __shared__ unsigned char my_smem[]; __shared__ bool found; // Shared Memory hold blockdim.x T for holding the cumulative sum, // blockDim.x AccT for normalizing the probabilities, scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem); accscalar_t *asmem = reinterpret_cast<accscalar_t *>(&my_smem[blockDim.x * sizeof(scalar_t)]); accscalar_t accZero = static_cast<accscalar_t>(0); scalar_t zero = static_cast<scalar_t>(0); for (int64_t curDist = blockIdx.x; curDist < distributions; curDist += gridDim.x) { // Each block handles one distribution // First pass, find the total sum of the distribution accscalar_t sum = accZero; scalar_t val; for (int cat = threadIdx.x; cat < categories; cat += blockDim.x) { val = dist[curDist * stride_dist + cat * stride_categories]; assert(val >= zero); assert(!THCNumerics<scalar_t>::isinf(val)); assert(!THCNumerics<scalar_t>::isnan(val)); sum = sum + static_cast<accscalar_t>(val); } // threadIdx.x == 0 has the sum value from this sum = reduceBlock(asmem, blockDim.x, sum, ReduceAdd<accscalar_t>(), accZero); // Broadcast sum and sample value if (threadIdx.x == 0) { // Make sure the sum of our distribution didn't overflow assert(!THCNumerics<accscalar_t>::isinf(sum)); assert(sum > accZero); asmem[0] = sum; smem[0] = sampled[curDist]; } __syncthreads(); sum = asmem[0]; scalar_t sample = smem[0]; __syncthreads(); if (sum == accZero) { // Choose the first element if (threadIdx.x == 0) { dest[curDist] = 0; } continue; } int chunks = (categories + (int)blockDim.x - 1) / blockDim.x; scalar_t prevHighProb = zero; found = false; for (int chunk = 0; chunk < chunks && !found; ++chunk) { // All threads in bounds load a value int cat = chunk * blockDim.x + threadIdx.x; accscalar_t a_dist_val = cat < categories ? static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum : accZero; scalar_t dist_val = static_cast<scalar_t>(a_dist_val); smem[threadIdx.x] = dist_val; __syncthreads(); // Perform an inclusive prefix sum of the shared memory contents for (int offset = 1; offset < blockDim.x; offset *= 2) { scalar_t val = zero; if (threadIdx.x >= offset) { val = smem[threadIdx.x - offset] + smem[threadIdx.x]; } __syncthreads(); if (threadIdx.x >= offset) { smem[threadIdx.x] = val; } __syncthreads(); } // Each thread will check to see if the sample falls in its // bucket scalar_t curBucket = smem[threadIdx.x] + prevHighProb; scalar_t prevBucket = threadIdx.x == 0 ? prevHighProb : smem[threadIdx.x - 1] + prevHighProb; bool inBucket = (cat < categories) && (!(sample >= curBucket) && (sample >= prevBucket) && (dist_val > zero)); if (inBucket) { // We're done; we have the sample // Torch indices are 1-based dest[curDist] = cat; found = true; } // Store the previous scan's high value for future use prevHighProb = prevHighProb + smem[blockDim.x - 1]; __syncthreads(); } if (threadIdx.x == 0 && !found) { // This should address a rare bug where we don't select a valid index. This likely occurs when // due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but // and our uniform sample is greater than this value. In this case we likely have unitialized memory // in dest[curDist]. So basically we will loop through the distribution and pick the largest index // where the distribution is non-zero. This is obviously terribly inefficient, but due to the // rarity in which this occurs, this should not be an issue. for (int cat = categories - 1; cat >= 0; --cat) { if (dist[curDist * stride_dist + cat * stride_categories] > zero) { dest[curDist] = cat; break; } } } } } void multinomial_kernel_impl(Tensor& result, const Tensor& self, const int64_t n_sample, const bool with_replacement, Generator* generator) { auto gen = get_generator_or_default<CUDAGenerator>(generator, cuda::detail::getDefaultCUDAGenerator()); int inputSize = self.dim(); int64_t numDist = inputSize == 1 ? 1 : self.size(0); int numCategories = inputSize == 1 ? self.size(0) : self.size(1); // Restructure data for 2d auto self_v = inputSize == 1 ? self.view({numDist, numCategories}) : self; result.resize_({numDist, n_sample}); AT_DISPATCH_FLOATING_TYPES(self_v.scalar_type(), "multinomial_kernel_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto props = at::cuda::getCurrentDeviceProperties(); assert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; int maxShared = props->sharedMemPerBlock; int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads) * (sizeof(scalar_t) + sizeof(accscalar_t)); if (n_sample == 1 && maxShared >= requiredShared) { // Optimized allocation-free implementation // To exploit greater parallelism for the sampling, generate the // Uniform random samples in a separate kernel launch, into // temporarily allocated memory. The device RNG is thread-limited Tensor sampled = native::empty_cuda({numDist, n_sample}, self_v.options()); at::native::uniform_cuda_(sampled, 0.0, 1.0, gen); dim3 block(numCategories < maxThreads ? numCategories : maxThreads); dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4); sampleMultinomialOnce<scalar_t, accscalar_t> <<<grid, block, requiredShared, at::cuda::getCurrentCUDAStream()>>>( result.data_ptr<int64_t>(), numDist, numCategories, sampled.data_ptr<scalar_t>(), self_v.data_ptr<scalar_t>(), self_v.stride(0), self_v.stride(1) ); } else { // Generic, slow implementation with memory allocations // For sampling without replacement, we modify the distribution // for subsequent samples in this space Tensor origDist = native::empty_like(self_v); origDist.copy_(self_v); Tensor normDist = native::empty_like(self_v); Tensor prefixSum = native::empty_like(self_v); // Renorm along rows normDist.copy_(origDist); renormRows(normDist); // Prefix sum along rows legacy::cuda::_th_cumsum_out(prefixSum, normDist, 1); std::pair<uint64_t, uint64_t> rng_engine_inputs; if (with_replacement) { { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); // each thread will utilize one random, however, since we have to use // curand_uniform4 (See Note [Register spilling in curand call for CUDA < 10]), // offset is 4. rng_engine_inputs = gen->philox_engine_inputs(4); } // Sample with replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from one // distribution concurrently. dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS); sampleMultinomialWithReplacement <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( rng_engine_inputs, n_sample, result.data_ptr<int64_t>(), numDist, numCategories, prefixSum.data_ptr<scalar_t>(), normDist.data_ptr<scalar_t>()); } else { // Sample without replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from a different // distribution concurrently. ptrdiff_t numBlocks = (numDist + 4 - 1) / 4; dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS); for (int sample = 0; sample < n_sample; ++sample) { if (sample > 0) { // Update probabilities // Renorm along rows normDist.copy_(origDist); renormRows(normDist); // Prefix sum along rows legacy::cuda::_th_cumsum_out(prefixSum, normDist, 1); } { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); // each thread will utilize one random, however, since we have to use // curand_uniform4 (See Note [Register spilling in curand call for CUDA < 10]), // offset is 4. rng_engine_inputs = gen->philox_engine_inputs(4); } // The kernel can only draw one sample before we have to // recalculate our distribution sampleMultinomialWithoutReplacement <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( rng_engine_inputs, n_sample, sample, result.data_ptr<int64_t>(), numDist, numCategories, origDist.data_ptr<scalar_t>(), prefixSum.data_ptr<scalar_t>()); } } } }); if (inputSize == 1) { result.resize_({n_sample}); } } } REGISTER_DISPATCH(multinomial_stub, &multinomial_kernel_impl); }}
c46f3c6200deb6ceadea12e560c35f26c6e8f4a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <cmeansMultiGPU.h> #include <cmeansMultiGPUcu.h> #include <float.h> __device__ float parallelSum(float* data, const unsigned int ndata) { const unsigned int tid = threadIdx.x; float t; __syncthreads(); // Butterfly sum. ndata MUST be a power of 2. for(unsigned int bit = ndata >> 1; bit > 0; bit >>= 1) { t = data[tid] + data[tid^bit]; __syncthreads(); data[tid] = t; __syncthreads(); } return data[tid]; } __global__ void UpdateClusterCentersGPU(const float* oldClusters, const float* events, float* newClusters, float* memberships, float* denoms, int my_num_events) { float membershipValue;//, denominator; int d = blockIdx.y; int event_matrix_offset = my_num_events*d; int membership_matrix_offset = my_num_events*blockIdx.x; __shared__ float numerators[NUM_THREADS_UPDATE]; // Sum of the memberships computed by each thread // The sum of all of these denominators together is effectively the size of the cluster __shared__ float denominators[NUM_THREADS_UPDATE]; int tid = threadIdx.x; // initialize numerators and denominators to 0 denominators[tid] = 0; numerators[tid] = 0; __syncthreads(); // Compute new membership value for each event // Add its contribution to the numerator and denominator for that thread for(int j = tid; j < my_num_events; j+=NUM_THREADS_UPDATE){ membershipValue = memberships[membership_matrix_offset + j]; numerators[tid] += events[event_matrix_offset + j]*membershipValue; denominators[tid] += membershipValue; } __syncthreads(); if(tid == 0){ // Sum up the numerator/denominator, one for this block for(int j = 1; j < NUM_THREADS_UPDATE; j++){ numerators[0] += numerators[j]; } if(d == 0) { for(int j = 1; j < NUM_THREADS_UPDATE; j++){ denominators[0] += denominators[j]; } denoms[blockIdx.x] = denominators[0]; } // Set the new center for this block //newClusters[blockIdx.x*NUM_DIMENSIONS + d] = numerators[0]/denominators[0]; newClusters[blockIdx.x*NUM_DIMENSIONS + d] = numerators[0]; } } __global__ void UpdateClusterCentersGPU2(const float* oldClusters, const float* events, float* newClusters, float* memberships, int my_num_events) { float membershipValue; float eventValue; // Compute cluster range for this block int c_start = blockIdx.x*NUM_CLUSTERS_PER_BLOCK; int num_c = NUM_CLUSTERS_PER_BLOCK; // Handle boundary condition //if(blockIdx.x == (gridDim.x-1) && (NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK))) //{ // num_c = NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK; //} // Dimension index int d = blockIdx.y; int event_matrix_offset = my_num_events*d; __shared__ float numerators[NUM_THREADS_UPDATE*NUM_CLUSTERS_PER_BLOCK]; int tid = threadIdx.x; // initialize numerators and denominators to 0 for(int c = 0; c < num_c; c++) { numerators[c*NUM_THREADS_UPDATE+tid] = 0; } // Compute new membership value for each event // Add its contribution to the numerator and denominator for that thread for(int j = tid; j < my_num_events; j+=NUM_THREADS_UPDATE){ eventValue = events[event_matrix_offset + j]; for(int c = 0; c < num_c; c++) { membershipValue = memberships[(c+c_start)*my_num_events + j]; numerators[c*NUM_THREADS_UPDATE+tid] += eventValue*membershipValue; } } __syncthreads(); for(int c = 0; c < num_c; c++) { numerators[c*NUM_THREADS_UPDATE+tid] = parallelSum(&numerators[NUM_THREADS_UPDATE*c],NUM_THREADS_UPDATE); } __syncthreads(); if(tid == 0){ for(int c = 0; c < num_c; c++) { // Set the new center for this block newClusters[(c+c_start)*NUM_DIMENSIONS + d] = numerators[c*NUM_THREADS_UPDATE]; } } } __global__ void UpdateClusterCentersGPU3(const float* oldClusters, const float* events, float* newClusters, float* memberships, int my_num_events) { float membershipValue; float eventValue; // Compute cluster range for this block int c_start = blockIdx.y*NUM_CLUSTERS_PER_BLOCK; int num_c = NUM_CLUSTERS_PER_BLOCK; // Handle boundary condition if(blockIdx.y == gridDim.y-1 && NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK) { num_c = NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK; } // Dimension index int d = blockIdx.x; int event_matrix_offset = my_num_events*d; __shared__ float numerators[NUM_THREADS_UPDATE*NUM_CLUSTERS_PER_BLOCK]; int tid = threadIdx.x; // initialize numerators and denominators to 0 for(int c = 0; c < num_c; c++) { numerators[c*NUM_THREADS_UPDATE+tid] = 0; } // Compute new membership value for each event // Add its contribution to the numerator and denominator for that thread for(int j = tid; j < my_num_events; j+=NUM_THREADS_UPDATE){ eventValue = events[event_matrix_offset + j]; numerators[0*NUM_THREADS_UPDATE+tid] += eventValue*memberships[(0+c_start)*my_num_events + j]; numerators[1*NUM_THREADS_UPDATE+tid] += eventValue*memberships[(1+c_start)*my_num_events + j]; numerators[2*NUM_THREADS_UPDATE+tid] += eventValue*memberships[(2+c_start)*my_num_events + j]; numerators[3*NUM_THREADS_UPDATE+tid] += eventValue*memberships[(3+c_start)*my_num_events + j]; } __syncthreads(); for(int c = 0; c < num_c; c++) { numerators[c*NUM_THREADS_UPDATE+tid] = parallelSum(&numerators[NUM_THREADS_UPDATE*c],NUM_THREADS_UPDATE); } __syncthreads(); if(tid == 0){ for(int c = 0; c < num_c; c++) { // Set the new center for this block newClusters[(c+c_start)*NUM_DIMENSIONS + d] = numerators[c*NUM_THREADS_UPDATE]; } } } __global__ void ComputeClusterSizes(float* memberships, float* sizes, int my_num_events) { __shared__ float partial_sums[512]; partial_sums[threadIdx.x] = 0.0f; for(int i=threadIdx.x; i < my_num_events; i += 512) { partial_sums[threadIdx.x] += memberships[blockIdx.x*my_num_events+i]; } __syncthreads(); float sum = parallelSum(partial_sums,512); __syncthreads(); if(threadIdx.x) { sizes[blockIdx.x] = sum; } } __global__ void ComputeDistanceMatrix(const float* clusters, const float* events, float* matrix, int my_num_events) { // copy the relavant center for this block into shared memory __shared__ float center[NUM_DIMENSIONS]; for(int j = threadIdx.x; j < NUM_DIMENSIONS; j+=NUM_THREADS_DISTANCE){ center[j] = clusters[blockIdx.y*NUM_DIMENSIONS+j]; } __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < my_num_events) { matrix[blockIdx.y*my_num_events+i] = CalculateDistanceGPU(center,events,blockIdx.y,i,my_num_events); } } __global__ void ComputeMembershipMatrix(float* distances, float* memberships, int my_num_events) { float membershipValue; int i = blockIdx.x * blockDim.x + threadIdx.x; // For each event if(i < my_num_events) { membershipValue = MembershipValueGPU(blockIdx.y, i, distances, my_num_events); #if FUZZINESS_SQUARE // This is much faster than the pow function membershipValue = membershipValue*membershipValue; #else membershipValue = __powf(membershipValue,FUZZINESS)+1e-30; #endif memberships[blockIdx.y*my_num_events+i] = membershipValue; } } __global__ void ComputeMembershipMatrixLinear(float* distances, int my_num_events) { float membershipValue; float denom = 0.0f; float dist; int i = blockIdx.x * blockDim.x + threadIdx.x; // For each event if(i < my_num_events) { for(int c=0; c < NUM_CLUSTERS; c++) { dist = distances[c*my_num_events+i]; #if FUZZINESS_SQUARE dist = dist*dist; #else dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30; #endif denom += 1.0f / dist; // what if dist is really big? } for(int c=0; c < NUM_CLUSTERS; c++) { // not enough shared memory to store an array of distances // for each thread, so just recompute them like above dist = distances[c*my_num_events+i]; #if FUZZINESS_SQUARE dist = dist*dist; membershipValue = 1.0f/(dist*denom); // u membershipValue *= membershipValue; // u^p, p=2 #else dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30; membershipValue = 1.0f/(dist*denom); // u membershipValue = __powf(membershipValue,FUZZINESS); // u^p #endif distances[c*my_num_events+i] = membershipValue; } } } __global__ void ComputeNormalizedMembershipMatrix(float* distances, float* memberships, int my_num_events) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < my_num_events) { memberships[blockIdx.y*my_num_events+i] = MembershipValueGPU(blockIdx.y, i, distances,my_num_events); } } __global__ void ComputeNormalizedMembershipMatrixLinear(float* distances, int my_num_events) { float membershipValue; float denom = 0.0f; float dist; int i = blockIdx.x * blockDim.x + threadIdx.x; // For each event if(i < my_num_events) { for(int c=0; c < NUM_CLUSTERS; c++) { dist = distances[c*my_num_events+i]; #if FUZZINESS_SQUARE dist = dist*dist; #else dist = __powf(dist,2.0f/(FUZZINESS-1.0f)); #endif denom += 1.0f / dist; } for(int c=0; c < NUM_CLUSTERS; c++) { // not enough shared memory to store an array of distances // for each thread, so just recompute them like above dist = distances[c*my_num_events+i]; #if FUZZINESS_SQUARE dist = dist*dist; membershipValue = 1.0f/(dist*denom); // u #else dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30; membershipValue = 1.0f/(dist*denom); // u #endif distances[c*my_num_events+i] = membershipValue; } } } __device__ float MembershipValueGPU(int clusterIndex, int eventIndex, const float* distanceMatrix, int my_num_events){ float myClustDist = 0; // Compute the distance from this event to the given cluster myClustDist = distanceMatrix[clusterIndex*my_num_events+eventIndex]; float sum =0; float otherClustDist; // Compute the distance to all other clusters // Note: This is kind of inefficient, because the distance to every other cluster // is being re-computed by every other block // If each block handled a certain set of events rather than a cluster // we might be able to avoid this. for(int j = 0; j< NUM_CLUSTERS; j++){ otherClustDist = distanceMatrix[j*my_num_events+eventIndex]; #if FUZZINESS_SQUARE sum += (myClustDist/otherClustDist)*(myClustDist/otherClustDist); #else sum += __powf((myClustDist/otherClustDist),(2.0f/(FUZZINESS-1.0f))); #endif } return 1/sum; } __device__ float CalculateDistanceGPU(const float* clusters, const float* events, int clusterIndex, int eventIndex, int my_num_events){ float sum = 0; float tmp; #if DISTANCE_MEASURE == 0 #pragma unroll 1 // Prevent compiler from unrolling this loop too much, eats up too many registers for(int i = 0; i < NUM_DIMENSIONS; i++){ tmp = events[i*my_num_events+eventIndex] - clusters[i]; //tmp = events[i*my_num_events+eventIndex] - clusters[clusterIndex*NUM_DIMENSIONS +i]; //tmp = events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i]; sum += tmp*tmp; } sum = sqrt(sum+1e-30); #endif #if DISTANCE_MEASURE == 1 #pragma unroll 1 // Prevent compiler from unrolling this loop too much, eats up too many registers for(int i = 0; i < NUM_DIMENSIONS; i++){ tmp = events[i*my_num_events+eventIndex] - clusters[i]; //tmp = events[i*my_num_events+eventIndex] - clusters[clusterIndex*NUM_DIMENSIONS +i]; //tmp = events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i]; sum += abs(tmp)+1e-30; } #endif #if DISTANCE_MEASURE == 2 #pragma unroll 1 // Prevent compiler from unrolling this loop too much, eats up too many registers for(int i = 0; i < NUM_DIMENSIONS; i++){ tmp = abs(events[i*my_num_events + eventIndex] - clusters[i]); //tmp = abs(events[i*my_num_events + eventIndex] - clusters[clusterIndex*NUM_DIMENSIONS + i]); //tmp = abs(events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i]); if(tmp > sum) sum = tmp+1e-30; } #endif return sum; } __device__ float CalculateQII(const float* events, float* distanceMatrix, int cluster_index_I, float* EI, float* numMem, int my_num_events){ EI[threadIdx.x] = 0; numMem[threadIdx.x] = 0; for(int i = threadIdx.x; i < my_num_events; i+=Q_THREADS){ float distance = distanceMatrix[cluster_index_I*my_num_events+i]; float memVal = MembershipValueDist(distanceMatrix, cluster_index_I, i, distance, my_num_events); if(memVal > MEMBER_THRESH){ EI[threadIdx.x] += pow(memVal, 2) * pow(distance, 2); numMem[threadIdx.x]++; } } __syncthreads(); if(threadIdx.x == 0){ for(int i = 1; i < Q_THREADS; i++){ EI[0] += EI[i]; numMem[0] += numMem[i]; } } __syncthreads(); return ((((float)K1) * numMem[0]) - (((float)K2) * EI[0]) - (((float)K3) * NUM_DIMENSIONS)); } __device__ float CalculateQIJ(const float* events, float* distanceMatrix, int cluster_index_I, int cluster_index_J, float * EI, float * EJ, float *numMem, int my_num_events){ EI[threadIdx.x] = 0; EJ[threadIdx.x] = 0; numMem[threadIdx.x] = 0; for(int i = threadIdx.x; i < my_num_events; i+=Q_THREADS){ float distance = distanceMatrix[cluster_index_I*my_num_events+i]; float memValI = MembershipValueDist(distanceMatrix, cluster_index_I, i, distance, my_num_events); if(memValI > MEMBER_THRESH){ EI[threadIdx.x] += pow(memValI, 2) * pow(distance, 2); } distance = distanceMatrix[cluster_index_J*my_num_events+i]; float memValJ = MembershipValueDist(distanceMatrix, cluster_index_J, i, distance, my_num_events); if(memValJ > MEMBER_THRESH){ EJ[threadIdx.x] += pow(memValJ, 2) * pow(distance, 2); } if(memValI > MEMBER_THRESH && memValJ > MEMBER_THRESH){ numMem[threadIdx.x]++; } } __syncthreads(); if(threadIdx.x == 0){ for(int i = 1; i < Q_THREADS; i++){ EI[0] += EI[i]; EJ[0] += EJ[i]; numMem[0] += numMem[i]; } } __syncthreads(); float EB = (EI[0] > EJ[0]) ? EI[0] : EJ[0]; return ((-1*((float)K1)*numMem[0]) + ((float)K2)*EB); } __global__ void CalculateQMatrixGPUUpgrade(const float* events, const float* clusters, float* matrix, float* distanceMatrix, int start_row, int my_num_events){ __shared__ float EI[Q_THREADS]; __shared__ float EJ[Q_THREADS]; __shared__ float numMem[Q_THREADS]; int row = blockIdx.x + start_row; int col = blockIdx.y; if(row == col){ matrix[row*NUM_CLUSTERS + col ] = CalculateQII(events, distanceMatrix, row, EI, numMem, my_num_events); } else{ matrix[row*NUM_CLUSTERS + col] = CalculateQIJ(events, distanceMatrix, row, col, EI, EJ, numMem, my_num_events); } } __device__ float MembershipValueDist(float* distanceMatrix, int clusterIndex, int eventIndex, float distance, int my_num_events){ float sum =0.0f; float otherClustDist; for(int j = 0; j< NUM_CLUSTERS; j++){ otherClustDist = distanceMatrix[j*my_num_events+eventIndex]; sum += __powf((float)(distance/otherClustDist),(2.0f/(FUZZINESS-1.0f))); } return 1.0f/sum; }
c46f3c6200deb6ceadea12e560c35f26c6e8f4a1.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <cmeansMultiGPU.h> #include <cmeansMultiGPUcu.h> #include <float.h> __device__ float parallelSum(float* data, const unsigned int ndata) { const unsigned int tid = threadIdx.x; float t; __syncthreads(); // Butterfly sum. ndata MUST be a power of 2. for(unsigned int bit = ndata >> 1; bit > 0; bit >>= 1) { t = data[tid] + data[tid^bit]; __syncthreads(); data[tid] = t; __syncthreads(); } return data[tid]; } __global__ void UpdateClusterCentersGPU(const float* oldClusters, const float* events, float* newClusters, float* memberships, float* denoms, int my_num_events) { float membershipValue;//, denominator; int d = blockIdx.y; int event_matrix_offset = my_num_events*d; int membership_matrix_offset = my_num_events*blockIdx.x; __shared__ float numerators[NUM_THREADS_UPDATE]; // Sum of the memberships computed by each thread // The sum of all of these denominators together is effectively the size of the cluster __shared__ float denominators[NUM_THREADS_UPDATE]; int tid = threadIdx.x; // initialize numerators and denominators to 0 denominators[tid] = 0; numerators[tid] = 0; __syncthreads(); // Compute new membership value for each event // Add its contribution to the numerator and denominator for that thread for(int j = tid; j < my_num_events; j+=NUM_THREADS_UPDATE){ membershipValue = memberships[membership_matrix_offset + j]; numerators[tid] += events[event_matrix_offset + j]*membershipValue; denominators[tid] += membershipValue; } __syncthreads(); if(tid == 0){ // Sum up the numerator/denominator, one for this block for(int j = 1; j < NUM_THREADS_UPDATE; j++){ numerators[0] += numerators[j]; } if(d == 0) { for(int j = 1; j < NUM_THREADS_UPDATE; j++){ denominators[0] += denominators[j]; } denoms[blockIdx.x] = denominators[0]; } // Set the new center for this block //newClusters[blockIdx.x*NUM_DIMENSIONS + d] = numerators[0]/denominators[0]; newClusters[blockIdx.x*NUM_DIMENSIONS + d] = numerators[0]; } } __global__ void UpdateClusterCentersGPU2(const float* oldClusters, const float* events, float* newClusters, float* memberships, int my_num_events) { float membershipValue; float eventValue; // Compute cluster range for this block int c_start = blockIdx.x*NUM_CLUSTERS_PER_BLOCK; int num_c = NUM_CLUSTERS_PER_BLOCK; // Handle boundary condition //if(blockIdx.x == (gridDim.x-1) && (NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK))) //{ // num_c = NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK; //} // Dimension index int d = blockIdx.y; int event_matrix_offset = my_num_events*d; __shared__ float numerators[NUM_THREADS_UPDATE*NUM_CLUSTERS_PER_BLOCK]; int tid = threadIdx.x; // initialize numerators and denominators to 0 for(int c = 0; c < num_c; c++) { numerators[c*NUM_THREADS_UPDATE+tid] = 0; } // Compute new membership value for each event // Add its contribution to the numerator and denominator for that thread for(int j = tid; j < my_num_events; j+=NUM_THREADS_UPDATE){ eventValue = events[event_matrix_offset + j]; for(int c = 0; c < num_c; c++) { membershipValue = memberships[(c+c_start)*my_num_events + j]; numerators[c*NUM_THREADS_UPDATE+tid] += eventValue*membershipValue; } } __syncthreads(); for(int c = 0; c < num_c; c++) { numerators[c*NUM_THREADS_UPDATE+tid] = parallelSum(&numerators[NUM_THREADS_UPDATE*c],NUM_THREADS_UPDATE); } __syncthreads(); if(tid == 0){ for(int c = 0; c < num_c; c++) { // Set the new center for this block newClusters[(c+c_start)*NUM_DIMENSIONS + d] = numerators[c*NUM_THREADS_UPDATE]; } } } __global__ void UpdateClusterCentersGPU3(const float* oldClusters, const float* events, float* newClusters, float* memberships, int my_num_events) { float membershipValue; float eventValue; // Compute cluster range for this block int c_start = blockIdx.y*NUM_CLUSTERS_PER_BLOCK; int num_c = NUM_CLUSTERS_PER_BLOCK; // Handle boundary condition if(blockIdx.y == gridDim.y-1 && NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK) { num_c = NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK; } // Dimension index int d = blockIdx.x; int event_matrix_offset = my_num_events*d; __shared__ float numerators[NUM_THREADS_UPDATE*NUM_CLUSTERS_PER_BLOCK]; int tid = threadIdx.x; // initialize numerators and denominators to 0 for(int c = 0; c < num_c; c++) { numerators[c*NUM_THREADS_UPDATE+tid] = 0; } // Compute new membership value for each event // Add its contribution to the numerator and denominator for that thread for(int j = tid; j < my_num_events; j+=NUM_THREADS_UPDATE){ eventValue = events[event_matrix_offset + j]; numerators[0*NUM_THREADS_UPDATE+tid] += eventValue*memberships[(0+c_start)*my_num_events + j]; numerators[1*NUM_THREADS_UPDATE+tid] += eventValue*memberships[(1+c_start)*my_num_events + j]; numerators[2*NUM_THREADS_UPDATE+tid] += eventValue*memberships[(2+c_start)*my_num_events + j]; numerators[3*NUM_THREADS_UPDATE+tid] += eventValue*memberships[(3+c_start)*my_num_events + j]; } __syncthreads(); for(int c = 0; c < num_c; c++) { numerators[c*NUM_THREADS_UPDATE+tid] = parallelSum(&numerators[NUM_THREADS_UPDATE*c],NUM_THREADS_UPDATE); } __syncthreads(); if(tid == 0){ for(int c = 0; c < num_c; c++) { // Set the new center for this block newClusters[(c+c_start)*NUM_DIMENSIONS + d] = numerators[c*NUM_THREADS_UPDATE]; } } } __global__ void ComputeClusterSizes(float* memberships, float* sizes, int my_num_events) { __shared__ float partial_sums[512]; partial_sums[threadIdx.x] = 0.0f; for(int i=threadIdx.x; i < my_num_events; i += 512) { partial_sums[threadIdx.x] += memberships[blockIdx.x*my_num_events+i]; } __syncthreads(); float sum = parallelSum(partial_sums,512); __syncthreads(); if(threadIdx.x) { sizes[blockIdx.x] = sum; } } __global__ void ComputeDistanceMatrix(const float* clusters, const float* events, float* matrix, int my_num_events) { // copy the relavant center for this block into shared memory __shared__ float center[NUM_DIMENSIONS]; for(int j = threadIdx.x; j < NUM_DIMENSIONS; j+=NUM_THREADS_DISTANCE){ center[j] = clusters[blockIdx.y*NUM_DIMENSIONS+j]; } __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < my_num_events) { matrix[blockIdx.y*my_num_events+i] = CalculateDistanceGPU(center,events,blockIdx.y,i,my_num_events); } } __global__ void ComputeMembershipMatrix(float* distances, float* memberships, int my_num_events) { float membershipValue; int i = blockIdx.x * blockDim.x + threadIdx.x; // For each event if(i < my_num_events) { membershipValue = MembershipValueGPU(blockIdx.y, i, distances, my_num_events); #if FUZZINESS_SQUARE // This is much faster than the pow function membershipValue = membershipValue*membershipValue; #else membershipValue = __powf(membershipValue,FUZZINESS)+1e-30; #endif memberships[blockIdx.y*my_num_events+i] = membershipValue; } } __global__ void ComputeMembershipMatrixLinear(float* distances, int my_num_events) { float membershipValue; float denom = 0.0f; float dist; int i = blockIdx.x * blockDim.x + threadIdx.x; // For each event if(i < my_num_events) { for(int c=0; c < NUM_CLUSTERS; c++) { dist = distances[c*my_num_events+i]; #if FUZZINESS_SQUARE dist = dist*dist; #else dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30; #endif denom += 1.0f / dist; // what if dist is really big? } for(int c=0; c < NUM_CLUSTERS; c++) { // not enough shared memory to store an array of distances // for each thread, so just recompute them like above dist = distances[c*my_num_events+i]; #if FUZZINESS_SQUARE dist = dist*dist; membershipValue = 1.0f/(dist*denom); // u membershipValue *= membershipValue; // u^p, p=2 #else dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30; membershipValue = 1.0f/(dist*denom); // u membershipValue = __powf(membershipValue,FUZZINESS); // u^p #endif distances[c*my_num_events+i] = membershipValue; } } } __global__ void ComputeNormalizedMembershipMatrix(float* distances, float* memberships, int my_num_events) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < my_num_events) { memberships[blockIdx.y*my_num_events+i] = MembershipValueGPU(blockIdx.y, i, distances,my_num_events); } } __global__ void ComputeNormalizedMembershipMatrixLinear(float* distances, int my_num_events) { float membershipValue; float denom = 0.0f; float dist; int i = blockIdx.x * blockDim.x + threadIdx.x; // For each event if(i < my_num_events) { for(int c=0; c < NUM_CLUSTERS; c++) { dist = distances[c*my_num_events+i]; #if FUZZINESS_SQUARE dist = dist*dist; #else dist = __powf(dist,2.0f/(FUZZINESS-1.0f)); #endif denom += 1.0f / dist; } for(int c=0; c < NUM_CLUSTERS; c++) { // not enough shared memory to store an array of distances // for each thread, so just recompute them like above dist = distances[c*my_num_events+i]; #if FUZZINESS_SQUARE dist = dist*dist; membershipValue = 1.0f/(dist*denom); // u #else dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30; membershipValue = 1.0f/(dist*denom); // u #endif distances[c*my_num_events+i] = membershipValue; } } } __device__ float MembershipValueGPU(int clusterIndex, int eventIndex, const float* distanceMatrix, int my_num_events){ float myClustDist = 0; // Compute the distance from this event to the given cluster myClustDist = distanceMatrix[clusterIndex*my_num_events+eventIndex]; float sum =0; float otherClustDist; // Compute the distance to all other clusters // Note: This is kind of inefficient, because the distance to every other cluster // is being re-computed by every other block // If each block handled a certain set of events rather than a cluster // we might be able to avoid this. for(int j = 0; j< NUM_CLUSTERS; j++){ otherClustDist = distanceMatrix[j*my_num_events+eventIndex]; #if FUZZINESS_SQUARE sum += (myClustDist/otherClustDist)*(myClustDist/otherClustDist); #else sum += __powf((myClustDist/otherClustDist),(2.0f/(FUZZINESS-1.0f))); #endif } return 1/sum; } __device__ float CalculateDistanceGPU(const float* clusters, const float* events, int clusterIndex, int eventIndex, int my_num_events){ float sum = 0; float tmp; #if DISTANCE_MEASURE == 0 #pragma unroll 1 // Prevent compiler from unrolling this loop too much, eats up too many registers for(int i = 0; i < NUM_DIMENSIONS; i++){ tmp = events[i*my_num_events+eventIndex] - clusters[i]; //tmp = events[i*my_num_events+eventIndex] - clusters[clusterIndex*NUM_DIMENSIONS +i]; //tmp = events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i]; sum += tmp*tmp; } sum = sqrt(sum+1e-30); #endif #if DISTANCE_MEASURE == 1 #pragma unroll 1 // Prevent compiler from unrolling this loop too much, eats up too many registers for(int i = 0; i < NUM_DIMENSIONS; i++){ tmp = events[i*my_num_events+eventIndex] - clusters[i]; //tmp = events[i*my_num_events+eventIndex] - clusters[clusterIndex*NUM_DIMENSIONS +i]; //tmp = events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i]; sum += abs(tmp)+1e-30; } #endif #if DISTANCE_MEASURE == 2 #pragma unroll 1 // Prevent compiler from unrolling this loop too much, eats up too many registers for(int i = 0; i < NUM_DIMENSIONS; i++){ tmp = abs(events[i*my_num_events + eventIndex] - clusters[i]); //tmp = abs(events[i*my_num_events + eventIndex] - clusters[clusterIndex*NUM_DIMENSIONS + i]); //tmp = abs(events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i]); if(tmp > sum) sum = tmp+1e-30; } #endif return sum; } __device__ float CalculateQII(const float* events, float* distanceMatrix, int cluster_index_I, float* EI, float* numMem, int my_num_events){ EI[threadIdx.x] = 0; numMem[threadIdx.x] = 0; for(int i = threadIdx.x; i < my_num_events; i+=Q_THREADS){ float distance = distanceMatrix[cluster_index_I*my_num_events+i]; float memVal = MembershipValueDist(distanceMatrix, cluster_index_I, i, distance, my_num_events); if(memVal > MEMBER_THRESH){ EI[threadIdx.x] += pow(memVal, 2) * pow(distance, 2); numMem[threadIdx.x]++; } } __syncthreads(); if(threadIdx.x == 0){ for(int i = 1; i < Q_THREADS; i++){ EI[0] += EI[i]; numMem[0] += numMem[i]; } } __syncthreads(); return ((((float)K1) * numMem[0]) - (((float)K2) * EI[0]) - (((float)K3) * NUM_DIMENSIONS)); } __device__ float CalculateQIJ(const float* events, float* distanceMatrix, int cluster_index_I, int cluster_index_J, float * EI, float * EJ, float *numMem, int my_num_events){ EI[threadIdx.x] = 0; EJ[threadIdx.x] = 0; numMem[threadIdx.x] = 0; for(int i = threadIdx.x; i < my_num_events; i+=Q_THREADS){ float distance = distanceMatrix[cluster_index_I*my_num_events+i]; float memValI = MembershipValueDist(distanceMatrix, cluster_index_I, i, distance, my_num_events); if(memValI > MEMBER_THRESH){ EI[threadIdx.x] += pow(memValI, 2) * pow(distance, 2); } distance = distanceMatrix[cluster_index_J*my_num_events+i]; float memValJ = MembershipValueDist(distanceMatrix, cluster_index_J, i, distance, my_num_events); if(memValJ > MEMBER_THRESH){ EJ[threadIdx.x] += pow(memValJ, 2) * pow(distance, 2); } if(memValI > MEMBER_THRESH && memValJ > MEMBER_THRESH){ numMem[threadIdx.x]++; } } __syncthreads(); if(threadIdx.x == 0){ for(int i = 1; i < Q_THREADS; i++){ EI[0] += EI[i]; EJ[0] += EJ[i]; numMem[0] += numMem[i]; } } __syncthreads(); float EB = (EI[0] > EJ[0]) ? EI[0] : EJ[0]; return ((-1*((float)K1)*numMem[0]) + ((float)K2)*EB); } __global__ void CalculateQMatrixGPUUpgrade(const float* events, const float* clusters, float* matrix, float* distanceMatrix, int start_row, int my_num_events){ __shared__ float EI[Q_THREADS]; __shared__ float EJ[Q_THREADS]; __shared__ float numMem[Q_THREADS]; int row = blockIdx.x + start_row; int col = blockIdx.y; if(row == col){ matrix[row*NUM_CLUSTERS + col ] = CalculateQII(events, distanceMatrix, row, EI, numMem, my_num_events); } else{ matrix[row*NUM_CLUSTERS + col] = CalculateQIJ(events, distanceMatrix, row, col, EI, EJ, numMem, my_num_events); } } __device__ float MembershipValueDist(float* distanceMatrix, int clusterIndex, int eventIndex, float distance, int my_num_events){ float sum =0.0f; float otherClustDist; for(int j = 0; j< NUM_CLUSTERS; j++){ otherClustDist = distanceMatrix[j*my_num_events+eventIndex]; sum += __powf((float)(distance/otherClustDist),(2.0f/(FUZZINESS-1.0f))); } return 1.0f/sum; }
1b4c9d80dbc0831ee7e73d3b13553897c4d9cc6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file has been adapted from FasterTransformer file: // https://github.com/NVIDIA/FasterTransformer/blob/v4.0/fastertransformer/cuda/masked_multihead_attention.cu // We add License in the head. // headers sort by clang-format may cause compiling error or test faiure, // see https://github.com/PaddlePaddle/Paddle/pull/42840/ // clang-format off #include <hip/hip_fp16.h> #include <float.h> #include <hipcub/hipcub.hpp> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/fluid/operators/fused/attention_layer_norm.h" #include "paddle/fluid/operators/fused/attn_gemm.h" #include "paddle/fluid/operators/fused/fmha_ref.h" #include "paddle/fluid/operators/fused/fused_dropout_helper.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif // clang-format on namespace paddle { namespace operators { using Tensor = framework::Tensor; // for debug // #define _DEBUG_FUSED_MULTI_TRANSFORMER template <typename T> static void AllReduce(framework::Tensor &tensor, // NOLINT const int ring_id, const platform::CUDADeviceContext &ctx) { if (ring_id == -1) return; #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto dtype = platform::ToNCCLDataType(framework::TransToProtoVarType(tensor.dtype())); int64_t numel = tensor.numel(); const void *sendbuff = tensor.data<T>(); auto place = ctx.GetPlace(); void *recvbuff = tensor.mutable_data<T>(place); auto comm = platform::NCCLCommContext::Instance().Get(ring_id, place); auto stream = ctx.stream(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, numel, dtype, ncclSum, comm->comm(), stream)); #else PADDLE_THROW(platform::errors::Unimplemented( "PaddlePaddle should compile with NCCL or RCCL when used tensor model " "parallel op.")); #endif } namespace { namespace plat = paddle::platform; using float16 = plat::float16; #define MMHA_USE_FP32_ACUM_FOR_LOGITS #define MMHA_USE_FP32_ACUM_FOR_OUT template <typename T> struct Masked_multihead_attention_params { // output buffer, [B, 1(seq_len), num_head * dim_head] T *out; // qkv_out, [B, 1(seq_len), 3, num_head * dim_head] const T *qkv; // bias, [3, num_head, dim_head] const T *qkv_bias; // TODO(wangxi): optimize with input_lengths and max_input_len? // [bsz, 1, 1, time_step(cache_seq_length)+1] const T *attn_mask; // [2, B, num_head, max_seq_len(valid cache_seq_len), dim_head] // k [B, num_head, dim_head/x, max_seq_len, x], that is `seq_len` first // v [B, num_head, max_seq_len, dim_head] T *cache_kv; int batch_size; int num_head; int timestep; // cache_seq_length int max_seq_length; // 1.f / sqrt(Dh) float inv_sqrt_dh; }; struct Float8_ { float2 x; float2 y; float2 z; float2 w; }; // clang-format off template <typename T, int Dh> struct Qk_vec_ {}; template <> struct Qk_vec_<float, 32> { using Type = float; }; template <> struct Qk_vec_<float, 64> { using Type = float2; }; template <> struct Qk_vec_<float, 128> { using Type = float4; }; template <> struct Qk_vec_<float16, 32> { using Type = uint32_t; }; template <> struct Qk_vec_<float16, 64> { using Type = uint32_t; }; template <> struct Qk_vec_<float16, 128> { using Type = uint2; }; template <typename T, int THREADS_PER_KEY> struct K_vec_ {}; template <> struct K_vec_<float, 4> { using Type = float; }; template <> struct K_vec_<float, 2> { using Type = float2; }; template <> struct K_vec_<float, 1> { using Type = float4; }; template <> struct K_vec_<float16, 4> { using Type = uint32_t; }; template <> struct K_vec_<float16, 2> { using Type = uint2; }; template <> struct K_vec_<float16, 1> { using Type = uint4; }; template <typename T, int V_VEC_SIZE> struct V_vec_ {}; template <> struct V_vec_<float, 1> { using Type = float; }; template <> struct V_vec_<float, 2> { using Type = float2; }; template <> struct V_vec_<float, 4> { using Type = float4; }; template <> struct V_vec_<float16, 2> { using Type = uint32_t; }; template <> struct V_vec_<float16, 4> { using Type = uint2; }; template <> struct V_vec_<float16, 8> { using Type = uint4; }; #ifdef MMHA_USE_FP32_ACUM_FOR_OUT template <typename T> struct V_vec_acum_fp32_ {}; // template <> struct V_vec_acum_fp32_<float> { using Type = float; }; // template <> struct V_vec_acum_fp32_<float2> { using Type = float2; }; template <> struct V_vec_acum_fp32_<float4> { using Type = float4; }; // template <> struct V_vec_acum_fp32_<uint32_t> { using Type = float2; }; // template <> struct V_vec_acum_fp32_<uint2 > { using Type = Float4_; }; template <> struct V_vec_acum_fp32_<uint4> { using Type = Float8_; }; #endif // clang-format on inline __device__ float half_to_float(uint16_t h) { float f; asm volatile("cvt.f32.f16 %0, %1;\n" : "=f"(f) : "h"(h)); return f; } inline __device__ float2 half2_to_float2(uint32_t v) { uint16_t lo, hi; asm volatile("mov.b32 {%0, %1}, %2;\n" : "=h"(lo), "=h"(hi) : "r"(v)); return make_float2(half_to_float(lo), half_to_float(hi)); } inline __device__ uint32_t float2_to_half2(float2 f) { union { uint32_t u32; uint16_t u16[2]; } tmp; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 asm volatile("cvt.rn.f16x2.f32 %0, %1, %2;\n" : "=r"(tmp.u32) : "f"(f.y), "f"(f.x)); #else asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[0]) : "f"(f.x)); asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[1]) : "f"(f.y)); #endif return tmp.u32; } inline __device__ float add(float a, float b) { return a + b; } inline __device__ float2 add(float2 a, float2 b) { float2 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); return c; } inline __device__ float4 add(float4 a, float4 b) { float4 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); c.z = add(a.z, b.z); c.w = add(a.w, b.w); return c; } inline __device__ uint16_t add(uint16_t a, uint16_t b) { uint16_t c; asm volatile("add.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b)); return c; } inline __device__ uint32_t add(uint32_t a, uint32_t b) { uint32_t c; asm volatile("add.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b)); return c; } inline __device__ uint2 add(uint2 a, uint2 b) { uint2 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); return c; } inline __device__ uint4 add(uint4 a, uint4 b) { uint4 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); c.z = add(a.z, b.z); c.w = add(a.w, b.w); return c; } inline __device__ float2 add(uint32_t a, float2 fb) { float2 fa = half2_to_float2(a); return add(fa, fb); } inline __device__ Float8_ add(uint4 a, Float8_ fb) { Float8_ fc; fc.x = add(a.x, fb.x); fc.y = add(a.y, fb.y); fc.z = add(a.z, fb.z); fc.w = add(a.w, fb.w); return fc; } template <typename Acc, typename A, typename B> inline __device__ Acc mul(A a, B b); template <> inline __device__ float mul<float, float>(float a, float b) { return a * b; } template <> inline __device__ float2 mul(float2 a, float2 b) { float2 c; c.x = a.x * b.x; c.y = a.y * b.y; return c; } template <> inline __device__ float4 mul(float4 a, float4 b) { float4 c; c.x = a.x * b.x; c.y = a.y * b.y; c.z = a.z * b.z; c.w = a.w * b.w; return c; } template <> inline __device__ uint16_t mul(uint16_t a, uint16_t b) { uint16_t c; asm volatile("mul.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b)); return c; } template <> inline __device__ uint32_t mul(uint32_t a, uint32_t b) { uint32_t c; asm volatile("mul.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b)); return c; } template <> inline __device__ uint2 mul(uint2 a, uint2 b) { uint2 c; c.x = mul<uint32_t, uint32_t, uint32_t>(a.x, b.x); c.y = mul<uint32_t, uint32_t, uint32_t>(a.y, b.y); return c; } template <> inline __device__ uint4 mul(uint4 a, uint4 b) { uint4 c; c.x = mul<uint32_t, uint32_t, uint32_t>(a.x, b.x); c.y = mul<uint32_t, uint32_t, uint32_t>(a.y, b.y); c.z = mul<uint32_t, uint32_t, uint32_t>(a.z, b.z); c.w = mul<uint32_t, uint32_t, uint32_t>(a.w, b.w); return c; } inline __device__ float sum(float v) { return v; } inline __device__ float sum(float2 v) { return v.x + v.y; } inline __device__ float sum(float4 v) { return v.x + v.y + v.z + v.w; } inline __device__ float sum(uint16_t v) { return half_to_float(v); } inline __device__ float sum(uint32_t v) { float2 tmp = half2_to_float2(v); return tmp.x + tmp.y; } inline __device__ float sum(uint2 v) { uint32_t c = add(v.x, v.y); return sum(c); } inline __device__ float sum(uint4 v) { uint32_t c = add(v.x, v.y); c = add(c, v.z); c = add(c, v.w); return sum(c); } template <typename T> inline __device__ float dot(T a, T b) { return sum(mul<T, T, T>(a, b)); } template <typename A, typename T> inline __device__ float dot(T a, T b) { return sum(mul<A, T, T>(a, b)); } inline __device__ constexpr uint32_t shfl_mask(int threads) { return threads == 32 ? uint32_t(-1) : (1u << threads) - 1u; } template <typename T> inline __device__ __host__ T div_up(T m, T n) { return (m + n - 1) / n; } inline __device__ float fma(float a, float b, float c) { return a * b + c; } inline __device__ float2 fma(float2 a, float2 b, float2 c) { float2 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); return d; } inline __device__ float4 fma(float4 a, float4 b, float4 c) { float4 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); d.z = fma(a.z, b.z, c.z); d.w = fma(a.w, b.w, c.w); return d; } inline __device__ uint32_t fma(uint32_t a, uint32_t b, uint32_t c) { uint32_t d; asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(d) : "r"(a), "r"(b), "r"(c)); return d; } inline __device__ uint2 fma(uint2 a, uint2 b, uint2 c) { uint2 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); return d; } inline __device__ uint4 fma(uint4 a, uint4 b, uint4 c) { uint4 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); d.z = fma(a.z, b.z, c.z); d.w = fma(a.w, b.w, c.w); return d; } inline __device__ float2 fma(float a, float2 b, float2 c) { float2 d; d.x = fma(a, b.x, c.x); d.y = fma(a, b.y, c.y); return d; } inline __device__ float4 fma(float a, float4 b, float4 c) { float4 d; d.x = fma(a, b.x, c.x); d.y = fma(a, b.y, c.y); d.z = fma(a, b.z, c.z); d.w = fma(a, b.w, c.w); return d; } inline __device__ Float8_ fma(float a, Float8_ b, Float8_ c) { Float8_ d; d.x = fma(a, b.x, c.x); d.y = fma(a, b.y, c.y); d.z = fma(a, b.z, c.z); d.w = fma(a, b.w, c.w); return d; } inline __device__ uint32_t h0_h0(uint16_t a) { uint32_t b; asm volatile("mov.b32 %0, {%1, %1};" : "=r"(b) : "h"(a)); return b; } inline __device__ uint32_t fma(uint16_t a, uint32_t b, uint32_t c) { return fma(h0_h0(a), b, c); } inline __device__ uint2 fma(uint16_t a, uint2 b, uint2 c) { uint32_t s = h0_h0(a); uint2 d; d.x = fma(s, b.x, c.x); d.y = fma(s, b.y, c.y); return d; } inline __device__ uint4 fma(uint16_t a, uint4 b, uint4 c) { uint32_t s = h0_h0(a); uint4 d; d.x = fma(s, b.x, c.x); d.y = fma(s, b.y, c.y); d.z = fma(s, b.z, c.z); d.w = fma(s, b.w, c.w); return d; } inline __device__ float cast_to_float(float u) { return u; } inline __device__ float2 cast_to_float(float2 u) { return u; } inline __device__ float4 cast_to_float(float4 u) { return u; } inline __device__ Float8_ cast_to_float(uint4 u) { Float8_ tmp; tmp.x = half2_to_float2(u.x); tmp.y = half2_to_float2(u.y); tmp.z = half2_to_float2(u.z); tmp.w = half2_to_float2(u.w); return tmp; } template <int THREADS_PER_KEY, typename K_vec, int N> inline __device__ float qk_dot_(const K_vec (&q)[N], const K_vec (&k)[N]) { K_vec qk_vec = mul<K_vec, K_vec, K_vec>(q[0], k[0]); #pragma unroll for (int ii = 1; ii < N; ++ii) { qk_vec = fma(q[ii], k[ii], qk_vec); } float qk = sum(qk_vec); #pragma unroll for (int mask = THREADS_PER_KEY / 2; mask >= 1; mask /= 2) { qk += __shfl_xor_sync(uint32_t(-1), qk, mask); } return qk; } template <typename T, int THREADS_PER_KEY> struct Qk_dot { template <typename K_vec, int N> static inline __device__ float dot(const K_vec (&q)[N], const K_vec (&k)[N]) { return qk_dot_<THREADS_PER_KEY>(q, k); } }; template <int WARPS_PER_BLOCK, int WARP_SIZE = 32> inline __device__ float block_sum(float *red_smem, float sum) { int warp = threadIdx.x / WARP_SIZE; int lane = threadIdx.x % WARP_SIZE; #pragma unroll for (int mask = WARP_SIZE / 2; mask >= 1; mask /= 2) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } if (lane == 0) { red_smem[warp] = sum; } __syncthreads(); if (lane < WARPS_PER_BLOCK) { sum = red_smem[lane]; } #pragma unroll for (int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } return __shfl_sync(uint32_t(-1), sum, 0); } inline __device__ void convert_from_float(float &dst, float src) { // NOLINT dst = src; } inline __device__ void convert_from_float(float4 &dst, float4 src) { // NOLINT dst = src; } inline __device__ void convert_from_float(plat::float16 &dst, // NOLINT float src) { dst = static_cast<plat::float16>(src); } inline __device__ void convert_from_float(uint4 &dst, Float8_ src) { // NOLINT dst.x = float2_to_half2(src.x); dst.y = float2_to_half2(src.y); dst.z = float2_to_half2(src.z); dst.w = float2_to_half2(src.w); } inline __device__ void zero(uint16_t &dst) { dst = uint16_t(0); } // NOLINT template <typename T> inline __device__ void zero(T &dst) { // NOLINT constexpr int WORDS = sizeof(T) / 4; union { T raw; uint32_t words[WORDS]; } tmp; #pragma unroll for (int ii = 0; ii < WORDS; ++ii) { tmp.words[ii] = 0u; } dst = tmp.raw; } template <typename T, int Dh, int Dh_MAX, int THREADS_PER_KEY, int THREADS_PER_VALUE, int THREADS_PER_BLOCK> __global__ void masked_multihead_attention_kernel( Masked_multihead_attention_params<T> params, int pad_active_groups) { #if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) static_assert(Dh % THREADS_PER_KEY == 0, ""); static_assert(Dh % THREADS_PER_VALUE == 0, ""); constexpr int WARP_SIZE = 32; constexpr int WARPS_PER_BLOCK = THREADS_PER_BLOCK / WARP_SIZE; extern __shared__ char smem_[]; float *qk_smem = reinterpret_cast<float *>(smem_); char *logits_smem_ = smem_; // fp32 accum for logits float *logits_smem = reinterpret_cast<float *>(logits_smem_); T *out_smem = reinterpret_cast<T *>(smem_); __shared__ float red_smem[WARPS_PER_BLOCK * 2]; __shared__ T q_smem[Dh]; const int bi = blockIdx.y; const int hi = blockIdx.x; const int bhi = bi * params.num_head + hi; const int tid = threadIdx.x; float qk_max = -FLT_MAX; float qk = 0; // qkv [B, S=1, 3, num_head, head_dim] int qkv_base_offset = bi * 3 * params.num_head * Dh + hi * Dh; using Qk_vec = typename Qk_vec_<T, Dh_MAX>::Type; constexpr int QK_VEC_SIZE = sizeof(Qk_vec) / sizeof(T); static_assert(Dh % QK_VEC_SIZE == 0 && Dh / QK_VEC_SIZE <= WARP_SIZE, ""); constexpr int QK_VECS_PER_WARP = Dh / QK_VEC_SIZE; // cache_k, [B, num_head, head_dim / x, max_seq_len, x] // x == 4/8 for FP32/FP16, 128bit, 16Byte constexpr int QK_ELTS_IN_16B = 16 / sizeof(T); constexpr int QK_VECS_IN_16B = 16 / sizeof(Qk_vec); const T *q_base = params.qkv; const T *k_base = params.qkv + params.num_head * Dh; const T *q_bias_base = params.qkv_bias; const T *k_bias_base = params.qkv_bias + params.num_head * Dh; if (tid < QK_VECS_PER_WARP) { int qk_offset = qkv_base_offset + tid * QK_VEC_SIZE; int qk_bias_offset = hi * Dh + tid * QK_VEC_SIZE; Qk_vec q = *reinterpret_cast<const Qk_vec *>(&q_base[qk_offset]); Qk_vec k = *reinterpret_cast<const Qk_vec *>(&k_base[qk_offset]); Qk_vec q_bias = *reinterpret_cast<const Qk_vec *>(&q_bias_base[qk_bias_offset]); Qk_vec k_bias = *reinterpret_cast<const Qk_vec *>(&k_bias_base[qk_bias_offset]); q = add(q, q_bias); // TODO(wangxi): See this https://github.com/microsoft/unilm/issues/510 // we may not require k_bias. k = add(k, k_bias); *reinterpret_cast<Qk_vec *>(&q_smem[tid * QK_VEC_SIZE]) = q; int co = tid / QK_VECS_IN_16B; int ci = (tid % QK_VECS_IN_16B) * QK_VEC_SIZE; int offset = bhi * params.max_seq_length * Dh + co * params.max_seq_length * QK_ELTS_IN_16B + params.timestep * QK_ELTS_IN_16B + ci; *reinterpret_cast<Qk_vec *>(&params.cache_kv[offset]) = k; qk = dot<Qk_vec, Qk_vec>(q, k); } if (tid < WARP_SIZE) { for (int mask = WARP_SIZE / 2; mask >= 1; mask /= 2) { qk += __shfl_xor_sync(uint32_t(-1), qk, mask); } if (tid == 0) { // NOTE(wangxi): mask must be 0.0 // T mask = params.attn_mask[ // bi * (params.timestep + 1) + params.timestep]; // qk += static_cast<float>(mask); qk *= params.inv_sqrt_dh; qk_max = qk; qk_smem[params.timestep] = qk; } } __syncthreads(); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER if (bi == 0 && hi == 0 && tid == 0) { printf("=======q_out=======\n"); for (int i = 0; i < Dh; ++i) printf("%f ", static_cast<float>(q_smem[i])); printf("\n"); } __syncthreads(); #endif using K_vec = typename K_vec_<T, THREADS_PER_KEY>::Type; constexpr int K_VEC_SIZE = sizeof(K_vec) / sizeof(T); static_assert(Dh % K_VEC_SIZE == 0, ""); constexpr int K_ELTS_PER_THREAD = Dh / THREADS_PER_KEY; constexpr int K_VECS_PER_THREAD = K_ELTS_PER_THREAD / K_VEC_SIZE; int ko = tid / THREADS_PER_KEY; int ki = (tid % THREADS_PER_KEY) * K_VEC_SIZE; K_vec q[K_VECS_PER_THREAD]; #pragma unroll for (int i = 0; i < K_VECS_PER_THREAD; ++i) { q[i] = *reinterpret_cast<const K_vec *>( &q_smem[ki + i * THREADS_PER_KEY * K_VEC_SIZE]); } constexpr int K_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_KEY; constexpr int K_PER_WARP = WARP_SIZE / THREADS_PER_KEY; T *k_cache = &params.cache_kv[bhi * params.max_seq_length * Dh + ki]; int ti_end = div_up(params.timestep, K_PER_WARP) * K_PER_WARP; for (int ti = ko; ti < ti_end; ti += K_PER_ITER) { K_vec k[K_VECS_PER_THREAD]; #pragma unroll for (int ii = 0; ii < K_VECS_PER_THREAD; ++ii) { int jj = ii * params.max_seq_length + ti; if (ti < params.timestep) { k[ii] = *reinterpret_cast<const K_vec *>(&k_cache[jj * QK_ELTS_IN_16B]); } } float qk = Qk_dot<T, THREADS_PER_KEY>::dot(q, k) * params.inv_sqrt_dh; // bool is_mask = false; if (ti < params.timestep && tid % THREADS_PER_KEY == 0) { // qk_max = is_mask ? qk_max : fmaxf(qk_max, qk); T mask = params.attn_mask[bi * (params.timestep + 1) + ti]; qk += static_cast<float>(mask); qk_max = fmaxf(qk_max, qk); qk_smem[ti] = qk; } } #pragma unroll for (int mask = WARP_SIZE / 2; mask >= THREADS_PER_KEY; mask /= 2) { qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask)); } const int warp = tid / WARP_SIZE; const int lane = tid % WARP_SIZE; if (lane == 0) { red_smem[warp] = qk_max; } __syncthreads(); qk_max = lane < WARPS_PER_BLOCK ? red_smem[lane] : -FLT_MAX; #pragma unroll for (int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2) { qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask)); } qk_max = __shfl_sync(uint32_t(-1), qk_max, 0); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER if (bi == 0 && hi == 0 && tid == 0) { printf("=======qk_out=======\n"); for (int i = 0; i <= params.timestep; ++i) printf("%f ", qk_smem[i]); printf("qk_max=%f\n", qk_max); } __syncthreads(); #endif float sum = 0.f; for (int ti = tid; ti <= params.timestep; ti += THREADS_PER_BLOCK) { // bool is_mask = false; // float logit = is_mask ? 0.f : __expf(qk_smem[ti] - qk_max); float logit = __expf(qk_smem[ti] - qk_max); sum += logit; qk_smem[ti] = logit; } sum = block_sum<WARPS_PER_BLOCK>(&red_smem[WARPS_PER_BLOCK], sum); // FIXME(wangxi): need add 1.e-6f? float inv_sum = __fdividef(1.f, sum + 1.e-6f); for (int ti = tid; ti <= params.timestep; ti += THREADS_PER_BLOCK) { convert_from_float(logits_smem[ti], qk_smem[ti] * inv_sum); } __syncthreads(); constexpr int V_VEC_SIZE = Dh / THREADS_PER_VALUE; using V_vec = typename V_vec_<T, V_VEC_SIZE>::Type; int vo = tid / THREADS_PER_VALUE; int vi = (tid % THREADS_PER_VALUE) * V_VEC_SIZE; T *v_cache = &params.cache_kv[params.batch_size * params.num_head * params.max_seq_length * Dh + bhi * params.max_seq_length * Dh + vi]; #ifdef MMHA_USE_FP32_ACUM_FOR_OUT using V_vec_acum = typename V_vec_acum_fp32_<V_vec>::Type; #else using V_vec_acum = V_vec; #endif V_vec_acum out; zero(out); constexpr int V_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_VALUE; if (vo < V_PER_ITER) { for (int ti = vo; ti < params.timestep; ti += V_PER_ITER) { V_vec v = *reinterpret_cast<const V_vec *>(&v_cache[ti * Dh]); #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) float logit = logits_smem[ti]; out = fma(logit, cast_to_float(v), out); #else T logit = logits_smem[ti]; // Update the partial sums. out = fma(logit, v, out); #endif } } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER if (bi == 0 && hi == 0 && tid == 0) { printf("======logits_out=====\n"); for (int i = 0; i <= params.timestep; ++i) printf("%f ", logits_smem[i]); printf("\n"); } __syncthreads(); #endif if (vo == (params.timestep % V_PER_ITER)) { V_vec v = *reinterpret_cast<const V_vec *>( &params.qkv[2 * params.num_head * Dh + qkv_base_offset + vi]); V_vec v_bias = *reinterpret_cast<const V_vec *>( &params.qkv_bias[2 * params.num_head * Dh + hi * Dh + vi]); v = add(v, v_bias); *reinterpret_cast<V_vec *>(&v_cache[params.timestep * Dh]) = v; #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) out = fma(logits_smem[params.timestep], cast_to_float(v), out); #else out = fma(logits_smem[params.timestep], v, out); #endif } __syncthreads(); if (vo < pad_active_groups / 2) { zero(*reinterpret_cast<V_vec *>(&out_smem[vo * Dh + vi])); } #pragma unroll for (int active_groups = pad_active_groups; active_groups >= 2; active_groups /= 2) { int midpoint = active_groups / 2; if (vo >= midpoint && vo < active_groups) { #ifdef MMHA_USE_FP32_ACUM_FOR_OUT convert_from_float( *reinterpret_cast<V_vec *>(&out_smem[(vo - midpoint) * Dh + vi]), out); #else *reinterpret_cast<V_vec *>(&out_smem[(vo - midpoint) * Dh + vi]) = out; #endif } __syncthreads(); if (vo < midpoint) { out = add(*reinterpret_cast<const V_vec *>(&out_smem[vo * Dh + vi]), out); } __syncthreads(); } if (vo == 0) { #ifdef MMHA_USE_FP32_ACUM_FOR_OUT convert_from_float(*reinterpret_cast<V_vec *>(&params.out[bhi * Dh + vi]), out); #else *reinterpret_cast<V_vec *>(&params.out[bhi * Dh + vi]) = out; #endif } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER __syncthreads(); if (bi == 0 && hi == 0 && tid == 0) { printf("======fmha_out=====\n"); for (int i = 0; i < Dh; ++i) printf("%f ", static_cast<float>(params.out[i])); printf("\n"); } #endif #else assert(false); #endif } template <typename T> inline size_t smem_size_in_bytes( const Masked_multihead_attention_params<T> &params, int dim_head, int threads_per_value, int threads_per_block, int pad_active_groups) { size_t qk_sz = div_up(params.timestep + 1, 4) * 16; size_t logits_sz = 0; #ifndef MMHA_USE_FP32_ACUM_FOR_LOGITS if (sizeof(T) != 4) { logits_sz = div_up(params.max_seq_length, 4) * 4 * sizeof(T); } #endif size_t softmax_sz = qk_sz + logits_sz; int rows_per_red = pad_active_groups; size_t red_sz = rows_per_red * dim_head * sizeof(T) / 2; return max(softmax_sz, red_sz); } #define MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, \ THDS_PER_BLOCK, stream) \ int pad_active_groups = \ 1 << static_cast<int>(ceil(std::log2(THDS_PER_BLOCK / THDS_PER_VALUE))); \ size_t smem_sz = smem_size_in_bytes<T>(params, Dh, THDS_PER_VALUE, \ THDS_PER_BLOCK, pad_active_groups); \ dim3 grid(params.num_head, params.batch_size); \ hipLaunchKernelGGL(( masked_multihead_attention_kernel<T, Dh, Dh_MAX, THDS_PER_KEY, \ THDS_PER_VALUE, THDS_PER_BLOCK>) \ , dim3(grid), dim3(THDS_PER_BLOCK), smem_sz, stream, params, pad_active_groups) template <typename T, int Dh, int Dh_MAX> void fmha_launch_kernel(const Masked_multihead_attention_params<T> &params, const hipStream_t &stream) { constexpr int THREADS_PER_VALUE = Dh * sizeof(T) / 16; if (params.timestep < 32) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, stream); } else if (params.timestep < 2048) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, stream); } else { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, stream); } } template <typename T> void fmha(const platform::CUDADeviceContext &dev_ctx, const Tensor &qkv_tensor, const Tensor &qkv_bias_tensor, const Tensor &src_mask_tensor, Tensor *cache_kv_tensor, Tensor *out_tensor, int batch_size, int max_seq_length, int num_head, int dim_head, int timestep, float inv_sqrt_dh) { Masked_multihead_attention_params<T> params; params.out = out_tensor->data<T>(); params.qkv = qkv_tensor.data<T>(); params.qkv_bias = qkv_bias_tensor.data<T>(); params.attn_mask = src_mask_tensor.data<T>(); params.cache_kv = cache_kv_tensor->data<T>(); params.batch_size = batch_size; params.num_head = num_head; params.timestep = timestep; params.max_seq_length = max_seq_length; params.inv_sqrt_dh = inv_sqrt_dh; switch (dim_head) { case 32: fmha_launch_kernel<T, 32, 32>(params, dev_ctx.stream()); break; case 64: fmha_launch_kernel<T, 64, 64>(params, dev_ctx.stream()); break; case 96: fmha_launch_kernel<T, 96, 128>(params, dev_ctx.stream()); break; case 128: fmha_launch_kernel<T, 128, 128>(params, dev_ctx.stream()); break; default: PADDLE_THROW(platform::errors::Unimplemented( "dim_head = %d is unsupport, only support " "dim_head = 32, 64, 96 or 128 for now.", dim_head)); } } // NOTE: simd with 16Bytes(128bit), float is 4, float16 is 8 constexpr int VEC_16B = 16; template <typename T> __global__ void write_cache_k_kernel(T *cache_k, const T *k, const int num_head, const int dim_head, const int seq_len, const int max_seq_len) { const int bi = blockIdx.y; const int hi = blockIdx.z; constexpr int X_ELEMS = VEC_16B / sizeof(T); // [bsz, num_head, seq_len, dim_head/x, x] auto k_src = reinterpret_cast<const uint4 *>( k + bi * num_head * seq_len * dim_head + hi * seq_len * dim_head); // [bsz, num_head, dim_head/x, max_seq_len, x] auto k_dst = reinterpret_cast<uint4 *>( cache_k + bi * num_head * max_seq_len * dim_head + hi * max_seq_len * dim_head); const int out_idx = blockIdx.x * blockDim.x + threadIdx.x; // vec size int dim_head_div_x = dim_head / X_ELEMS; // FIXME(wangxi): num_head is not need? // if (out_idx >= num_head * dim_head_div_x * max_seq_len) return; if (out_idx >= dim_head_div_x * max_seq_len) return; int idx = out_idx; const int k_seq_len_id = idx % max_seq_len; // idx = (idx - k_seq_len_id) / max_seq_len; idx = idx / max_seq_len; const int k_vec_id = idx % dim_head_div_x; if (k_seq_len_id < seq_len) { k_dst[out_idx] = k_src[k_seq_len_id * dim_head_div_x + k_vec_id]; } } template <typename T> __global__ void write_cache_v_kernel(T *cache_v, const T *v, const int num_head, const int dim_head, const int seq_len, const int max_seq_len) { const int bi = blockIdx.y; const int hi = blockIdx.z; // [bsz, num_head, seq_len, dim_head/x, x] auto v_src = reinterpret_cast<const uint4 *>( v + bi * num_head * seq_len * dim_head + hi * seq_len * dim_head); // [bsz, num_head, max_seq_len, dim_head/x, x] auto v_dst = reinterpret_cast<uint4 *>( cache_v + bi * num_head * max_seq_len * dim_head + hi * max_seq_len * dim_head); const int idx = blockIdx.x * blockDim.x + threadIdx.x; constexpr int X_ELEMS = VEC_16B / sizeof(T); const int dim_head_div_x = dim_head / X_ELEMS; if (idx >= dim_head_div_x * seq_len) return; v_dst[idx] = v_src[idx]; } template <typename T> void write_cache_kv(const platform::CUDADeviceContext &dev_ctx, T *cache_k, T *cache_v, const T *k, const T *v, const int bsz, const int num_head, const int seq_len, const int max_seq_len, const int dim_head) { constexpr int block_sz = 128; constexpr int x = VEC_16B / sizeof(T); assert(dim_head % x == 0); PADDLE_ENFORCE_EQ( dim_head % x, 0, platform::errors::PreconditionNotMet( "dim_head=%d must be divisible by vec_size=%d", dim_head, x)); int max_size = max_seq_len * dim_head / x; int size = seq_len * dim_head / x; dim3 grid(div_up(max_size, block_sz), bsz, num_head); dim3 grid_v(div_up(size, block_sz), bsz, num_head); // transpose [bsz, num_head, seq_len, dim_head/x, x]-> // [bsz, num_head, dim_head/x, max_seq_len, x] hipLaunchKernelGGL(( write_cache_k_kernel), dim3(grid), dim3(block_sz), 0, dev_ctx.stream(), cache_k, k, num_head, dim_head, seq_len, max_seq_len); // copy [bsz, num_head, seq_len, dim_head/x, x]-> // [bsz, num_head, max_seq_len, dim_head/x, x] hipLaunchKernelGGL(( write_cache_v_kernel), dim3(grid_v), dim3(block_sz), 0, dev_ctx.stream(), cache_v, v, num_head, dim_head, seq_len, max_seq_len); } } // namespace template <typename T> class FusedMultiTransformerOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; auto place = ctx.GetPlace(); auto &dev_ctx = ctx.cuda_device_context(); auto *time_step = ctx.Input<Tensor>("TimeStep"); // 0. input auto *input_x = ctx.Input<Tensor>("X"); const auto input_x_dims = input_x->dims(); int bsz = input_x_dims[0]; int seq_len = input_x_dims[1]; int dim_embed = input_x_dims[2]; int bsz_seq = bsz * seq_len; // 1. layer norm const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm"); const float epsilon = ctx.Attr<float>("epsilon"); auto ln_scales = ctx.MultiInput<Tensor>("LnScale"); auto ln_biases = ctx.MultiInput<Tensor>("LnBias"); auto ln_compute = AttnLayerNorm<T>(dev_ctx, epsilon, bsz_seq, dim_embed); Tensor ln_mean, ln_var; auto *ln_mean_data = ln_mean.mutable_data<U>({bsz_seq}, place); auto *ln_var_data = ln_var.mutable_data<U>({bsz_seq}, place); // 2. qkv // x: qkv's input [batch_size, seq_len, dim_embed] // y: qkv's weight: [3, num_head, dim_head, dim_embed] auto qkv_weights = ctx.MultiInput<Tensor>("QKVW"); auto qkv_biases = ctx.MultiInput<Tensor>("QKVBias"); const auto qkv_w_dims = qkv_weights[0]->dims(); int num_head = qkv_w_dims[1]; int dim_head = qkv_w_dims[2]; int hidden_size = num_head * dim_head; int output_size = 3 * hidden_size; int input_size = dim_embed; bool compute_bias = qkv_biases.size() > 0 && time_step == nullptr; // (transA, transB, compute_bias) = (false, true, false) auto qkv_compute = AttnMatMul<T>(dev_ctx, false, true, bsz_seq, output_size, input_size, compute_bias); Tensor qkv_out; auto *qkv_out_data = qkv_out.mutable_data<T>({bsz, seq_len, 3, num_head, dim_head}, place); // 3. fmha AttnDropoutParam attn_param(true, "upscale_in_train", 0.0, true, true, 0, nullptr); auto fmha_compute = FMHARef<T>(dev_ctx, bsz, seq_len, num_head, dim_head, attn_param); auto *src_mask = ctx.Input<Tensor>("SrcMask"); auto cache_kvs = ctx.MultiInput<Tensor>("CacheKV"); auto cache_kv_outs = ctx.MultiOutput<Tensor>("CacheKVOut"); // auto *time_step = ctx.Input<Tensor>("TimeStep"); auto out_seq_len = seq_len; if (time_step) { PADDLE_ENFORCE_EQ(time_step->place(), platform::CPUPlace(), platform::errors::PreconditionNotMet( "The place of input(TimeStep) must be CPUPlace.")); // cache_seq_len int time_step_value = time_step->data<int>()[0]; PADDLE_ENFORCE_GT(time_step_value, 0, platform::errors::PreconditionNotMet( "The value of time_step must > 0, but now is %d", time_step_value)); PADDLE_ENFORCE_EQ( seq_len, 1, platform::errors::PreconditionNotMet( "In decode stage, the seq_len of input must be 1, but now is %d", seq_len)); out_seq_len += time_step_value; } Tensor transpose_out_2, qk_out; auto *transpose_out_2_data = transpose_out_2.mutable_data<T>( {3, bsz, num_head, seq_len, dim_head}, place); auto *qk_out_data = qk_out.mutable_data<T>({bsz, num_head, seq_len, out_seq_len}, place); Tensor softmax_out; Tensor attn_dropout_mask_out, attn_dropout_out; Tensor qktv_out, fmha_out; auto *softmax_out_data = softmax_out.mutable_data<T>( {bsz, num_head, seq_len, out_seq_len}, place); auto *attn_dropout_mask_out_data = attn_dropout_mask_out.mutable_data<T>( {bsz, num_head, seq_len, out_seq_len}, place); auto *attn_dropout_data_data = attn_dropout_out.mutable_data<T>( {bsz, num_head, seq_len, out_seq_len}, place); auto *qktv_out_data = qktv_out.mutable_data<T>({bsz, num_head, seq_len, dim_head}, place); auto *fmha_out_data = fmha_out.mutable_data<T>({bsz, seq_len, num_head, dim_head}, place); // 4. out_linear auto out_linear_weights = ctx.MultiInput<Tensor>("OutLinearW"); auto out_linear_biases = ctx.MultiInput<Tensor>("OutLinearBias"); int ring_id = ctx.Attr<int>("ring_id"); // (transA, transB, compute_bias) = (false, false, false) auto out_linear_compute = AttnMatMul<T>(dev_ctx, false, false, bsz_seq, dim_embed, hidden_size, false); // 5. ln(residual + bias) DropoutParam dropout_param2(true, 0, true, true, 0.0, nullptr, 0); FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper( dev_ctx, bsz_seq, dim_embed, dropout_param2, epsilon); auto ffn_ln_scales = ctx.MultiInput<Tensor>("FFNLnScale"); auto ffn_ln_biases = ctx.MultiInput<Tensor>("FFNLnBias"); Tensor bias_dropout_residual_out, dropout_mask_out; auto *bias_dropout_residual_out_data = bias_dropout_residual_out.mutable_data<T>({bsz, seq_len, dim_embed}, place); auto *dropout_mask_out_data = dropout_mask_out.mutable_data<uint8_t>( {bsz, seq_len, dim_embed}, place); // 6. ffn matmul1 auto ffn1_weights = ctx.MultiInput<Tensor>("FFN1Weight"); auto ffn1_biases = ctx.MultiInput<Tensor>("FFN1Bias"); auto ffn1_weight_dim = ffn1_weights[0]->dims(); int dim_ffn = ffn1_weight_dim[1]; auto ffn1_linear_compute = AttnMatMul<T>(dev_ctx, false, false, bsz_seq, dim_ffn, dim_embed, false); Tensor ffn1_out; auto *ffn1_out_data = ffn1_out.mutable_data<T>({bsz_seq, dim_ffn}, place); // 7. ffn act + bias DropoutParam ffn1_dropout_param(true, 0, true, true, 0.0, nullptr, 0); FusedDropoutHelper<T, uint8_t> fused_act_dropout_helper( dev_ctx, bsz_seq, dim_ffn, ffn1_dropout_param); Tensor ffn1_dropout_out, ffn1_dropout_mask; auto *ffn1_dropout_out_data = ffn1_dropout_out.mutable_data<T>({bsz_seq, dim_ffn}, place); auto *ffn1_dropout_mask_data = ffn1_dropout_mask.mutable_data<uint8_t>({bsz_seq, dim_ffn}, place); // 8. ffn2 matmul auto ffn2_weights = ctx.MultiInput<Tensor>("FFN2Weight"); auto ffn2_biases = ctx.MultiInput<Tensor>("FFN2Bias"); auto ffn2_linear_compute = AttnMatMul<T>(dev_ctx, false, false, bsz_seq, dim_embed, dim_ffn, false); // 9. ffn2 residual bias DropoutParam ffn2_dropout_param(true, 0, true, true, 0.0, nullptr, 0); FusedDropoutLayerNormHelper<T, uint8_t> ffn2_fused_dropout_helper( dev_ctx, bsz_seq, dim_embed, ffn2_dropout_param, epsilon); // calc auto *out = ctx.Output<Tensor>("Out"); auto *from_data = out->mutable_data<T>(place); Tensor *from_tensor = out; Tensor tmp_out; auto *tmp_out_data = tmp_out.mutable_data<T>({bsz, seq_len, dim_embed}, place); auto *x_data = input_x->data<T>(); Tensor *buf0 = nullptr; Tensor *buf1 = nullptr; // step0: x --> buf1 // step1: buf1 --> buf0 // step2: buf0 --> buf1 int layers = qkv_weights.size(); if (layers & 1) { // odd, set buf1 as out buf0 = &tmp_out; buf1 = out; } else { // even, set buf0 as out buf0 = out; buf1 = &tmp_out; } for (int i = 0; i < layers; ++i) { // step1. layer_norm if (i == 0 && pre_layer_norm) { auto *ln_scale_data = ln_scales[i]->data<U>(); auto *ln_bias_data = ln_biases[i]->data<U>(); // TODO(wangxi): can remove mean var in inference ln_compute.ComputeForward(x_data, ln_scale_data, ln_bias_data, buf1->data<T>(), ln_mean_data, ln_var_data); } else if (!pre_layer_norm) { PADDLE_THROW(platform::errors::Unimplemented( "Unimplemented post_layer_norm for now.")); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step1"; #endif // step2. qkv const Tensor *qkv_bias = qkv_biases.size() > 0 ? qkv_biases[i] : nullptr; // NOTE: in decoder stage, bias is fused in fmha const Tensor *bias = time_step ? nullptr : qkv_bias; qkv_compute.ComputeForward(qkv_weights[i], buf1, bias, &qkv_out, &qkv_out); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step2"; #endif // step3. fmha const Tensor *cache_kv = cache_kvs.size() > 0 ? cache_kvs[i] : nullptr; Tensor *cache_kv_out = cache_kv ? cache_kv_outs[i] : nullptr; if (time_step) { // generation decoder stage // [2, batch_size, num_head, max_seq_len, head_size] int max_seq_len = cache_kv->dims()[3]; fmha<T>(dev_ctx, qkv_out, *qkv_bias, *src_mask, cache_kv_out, &fmha_out, bsz, max_seq_len, num_head, dim_head, time_step->data<int>()[0], 1. / sqrt(dim_head)); } else if (cache_kv_out) { // generation context stage // TODO(wangxi): can remove dropout in inference fmha_compute.ComputeForward(qkv_out, nullptr, src_mask, &transpose_out_2, nullptr, &qk_out, nullptr, &softmax_out, &attn_dropout_mask_out, &attn_dropout_out, &qktv_out, &fmha_out); // [3, bsz, num_head, seq_len, head_dim] T *qkv_data = transpose_out_2_data; int64_t q_size = bsz * seq_len * num_head * dim_head; int64_t k_size = q_size; const T *q_ptr = qkv_data; const T *k_ptr = q_ptr + q_size; const T *v_ptr = k_ptr + k_size; // [2, bsz, num_head, max_seq_len, head_dim] int max_seq_len = cache_kv_out->dims()[3]; T *cache_kv_data = cache_kv_out->data<T>(); int64_t cache_k_size = bsz * num_head * max_seq_len * dim_head; T *cache_k_ptr = cache_kv_data; T *cache_v_ptr = cache_kv_data + cache_k_size; write_cache_kv<T>(dev_ctx, cache_k_ptr, cache_v_ptr, k_ptr, v_ptr, bsz, num_head, seq_len, max_seq_len, dim_head); } else { // not generation // TODO(wangxi): can remove dropout in inference fmha_compute.ComputeForward( qkv_out, cache_kv, src_mask, &transpose_out_2, cache_kv_out, &qk_out, nullptr, &softmax_out, &attn_dropout_mask_out, &attn_dropout_out, &qktv_out, &fmha_out); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step3"; #endif // step4. out_linear out_linear_compute.ComputeForward(out_linear_weights[i], &fmha_out, nullptr, buf1, nullptr); AllReduce<T>(*buf1, ring_id, dev_ctx); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step4"; #endif // step5. ln(residual + dropout(input + bias)) if (pre_layer_norm) { auto *ln_scale_data = ffn_ln_scales[i]->data<U>(); auto *ln_bias_data = ffn_ln_biases[i]->data<U>(); auto *out_linear_bias_data = out_linear_biases[i]->data<T>(); // inplace fused_dropout_layernorm_helper.LayernormResidualDropoutBias( dev_ctx, buf1->data<T>(), x_data, out_linear_bias_data, ln_scale_data, ln_bias_data, bias_dropout_residual_out_data, dropout_mask_out_data, buf1->data<T>(), ln_mean_data, ln_var_data); } else { } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step5"; #endif // step6. ffn matmul1 ffn1_linear_compute.ComputeForward(ffn1_weights[i], buf1, nullptr, &ffn1_out, nullptr); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step6"; #endif // step7. act bias // TODO(wangxi): remove dropout mask in inference fused_act_dropout_helper.DropoutActBias( dev_ctx, ffn1_out_data, ffn1_biases[i]->data<T>(), "gelu", ffn1_dropout_out_data, ffn1_dropout_mask_data); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step7"; #endif // step8. ffn matmul2 ffn2_linear_compute.ComputeForward(ffn2_weights[i], &ffn1_dropout_out, nullptr, buf1, nullptr); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step8.0"; #endif AllReduce<T>(*buf1, ring_id, dev_ctx); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step8.1"; #endif // step9. residual bias if (pre_layer_norm) { // TODO(wangxi): remove dropout mask in inference if (i < layers - 1) { auto *ln_scale_data = ln_scales[i + 1]->data<U>(); auto *ln_bias_data = ln_biases[i + 1]->data<U>(); ffn2_fused_dropout_helper.LayernormResidualDropoutBias( dev_ctx, buf1->data<T>(), bias_dropout_residual_out_data, ffn2_biases[i]->data<T>(), ln_scale_data, ln_bias_data, buf1->data<T>(), dropout_mask_out_data, buf0->data<T>(), ln_mean_data, ln_var_data); } else { ffn2_fused_dropout_helper.ResidualDropoutBias( dev_ctx, buf1->data<T>(), bias_dropout_residual_out_data, ffn2_biases[i]->data<T>(), buf1->data<T>(), dropout_mask_out_data); } } else { } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step9"; #endif x_data = buf1->data<T>(); std::swap(buf0, buf1); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(fused_multi_transformer, ops::FusedMultiTransformerOpKernel<plat::float16>, ops::FusedMultiTransformerOpKernel<float>);
1b4c9d80dbc0831ee7e73d3b13553897c4d9cc6b.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file has been adapted from FasterTransformer file: // https://github.com/NVIDIA/FasterTransformer/blob/v4.0/fastertransformer/cuda/masked_multihead_attention.cu // We add License in the head. // headers sort by clang-format may cause compiling error or test faiure, // see https://github.com/PaddlePaddle/Paddle/pull/42840/ // clang-format off #include <cuda_fp16.h> #include <float.h> #include <cub/cub.cuh> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/fluid/operators/fused/attention_layer_norm.h" #include "paddle/fluid/operators/fused/attn_gemm.h" #include "paddle/fluid/operators/fused/fmha_ref.h" #include "paddle/fluid/operators/fused/fused_dropout_helper.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif // clang-format on namespace paddle { namespace operators { using Tensor = framework::Tensor; // for debug // #define _DEBUG_FUSED_MULTI_TRANSFORMER template <typename T> static void AllReduce(framework::Tensor &tensor, // NOLINT const int ring_id, const platform::CUDADeviceContext &ctx) { if (ring_id == -1) return; #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto dtype = platform::ToNCCLDataType(framework::TransToProtoVarType(tensor.dtype())); int64_t numel = tensor.numel(); const void *sendbuff = tensor.data<T>(); auto place = ctx.GetPlace(); void *recvbuff = tensor.mutable_data<T>(place); auto comm = platform::NCCLCommContext::Instance().Get(ring_id, place); auto stream = ctx.stream(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, numel, dtype, ncclSum, comm->comm(), stream)); #else PADDLE_THROW(platform::errors::Unimplemented( "PaddlePaddle should compile with NCCL or RCCL when used tensor model " "parallel op.")); #endif } namespace { namespace plat = paddle::platform; using float16 = plat::float16; #define MMHA_USE_FP32_ACUM_FOR_LOGITS #define MMHA_USE_FP32_ACUM_FOR_OUT template <typename T> struct Masked_multihead_attention_params { // output buffer, [B, 1(seq_len), num_head * dim_head] T *out; // qkv_out, [B, 1(seq_len), 3, num_head * dim_head] const T *qkv; // bias, [3, num_head, dim_head] const T *qkv_bias; // TODO(wangxi): optimize with input_lengths and max_input_len? // [bsz, 1, 1, time_step(cache_seq_length)+1] const T *attn_mask; // [2, B, num_head, max_seq_len(valid cache_seq_len), dim_head] // k [B, num_head, dim_head/x, max_seq_len, x], that is `seq_len` first // v [B, num_head, max_seq_len, dim_head] T *cache_kv; int batch_size; int num_head; int timestep; // cache_seq_length int max_seq_length; // 1.f / sqrt(Dh) float inv_sqrt_dh; }; struct Float8_ { float2 x; float2 y; float2 z; float2 w; }; // clang-format off template <typename T, int Dh> struct Qk_vec_ {}; template <> struct Qk_vec_<float, 32> { using Type = float; }; template <> struct Qk_vec_<float, 64> { using Type = float2; }; template <> struct Qk_vec_<float, 128> { using Type = float4; }; template <> struct Qk_vec_<float16, 32> { using Type = uint32_t; }; template <> struct Qk_vec_<float16, 64> { using Type = uint32_t; }; template <> struct Qk_vec_<float16, 128> { using Type = uint2; }; template <typename T, int THREADS_PER_KEY> struct K_vec_ {}; template <> struct K_vec_<float, 4> { using Type = float; }; template <> struct K_vec_<float, 2> { using Type = float2; }; template <> struct K_vec_<float, 1> { using Type = float4; }; template <> struct K_vec_<float16, 4> { using Type = uint32_t; }; template <> struct K_vec_<float16, 2> { using Type = uint2; }; template <> struct K_vec_<float16, 1> { using Type = uint4; }; template <typename T, int V_VEC_SIZE> struct V_vec_ {}; template <> struct V_vec_<float, 1> { using Type = float; }; template <> struct V_vec_<float, 2> { using Type = float2; }; template <> struct V_vec_<float, 4> { using Type = float4; }; template <> struct V_vec_<float16, 2> { using Type = uint32_t; }; template <> struct V_vec_<float16, 4> { using Type = uint2; }; template <> struct V_vec_<float16, 8> { using Type = uint4; }; #ifdef MMHA_USE_FP32_ACUM_FOR_OUT template <typename T> struct V_vec_acum_fp32_ {}; // template <> struct V_vec_acum_fp32_<float> { using Type = float; }; // template <> struct V_vec_acum_fp32_<float2> { using Type = float2; }; template <> struct V_vec_acum_fp32_<float4> { using Type = float4; }; // template <> struct V_vec_acum_fp32_<uint32_t> { using Type = float2; }; // template <> struct V_vec_acum_fp32_<uint2 > { using Type = Float4_; }; template <> struct V_vec_acum_fp32_<uint4> { using Type = Float8_; }; #endif // clang-format on inline __device__ float half_to_float(uint16_t h) { float f; asm volatile("cvt.f32.f16 %0, %1;\n" : "=f"(f) : "h"(h)); return f; } inline __device__ float2 half2_to_float2(uint32_t v) { uint16_t lo, hi; asm volatile("mov.b32 {%0, %1}, %2;\n" : "=h"(lo), "=h"(hi) : "r"(v)); return make_float2(half_to_float(lo), half_to_float(hi)); } inline __device__ uint32_t float2_to_half2(float2 f) { union { uint32_t u32; uint16_t u16[2]; } tmp; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 asm volatile("cvt.rn.f16x2.f32 %0, %1, %2;\n" : "=r"(tmp.u32) : "f"(f.y), "f"(f.x)); #else asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[0]) : "f"(f.x)); asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[1]) : "f"(f.y)); #endif return tmp.u32; } inline __device__ float add(float a, float b) { return a + b; } inline __device__ float2 add(float2 a, float2 b) { float2 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); return c; } inline __device__ float4 add(float4 a, float4 b) { float4 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); c.z = add(a.z, b.z); c.w = add(a.w, b.w); return c; } inline __device__ uint16_t add(uint16_t a, uint16_t b) { uint16_t c; asm volatile("add.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b)); return c; } inline __device__ uint32_t add(uint32_t a, uint32_t b) { uint32_t c; asm volatile("add.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b)); return c; } inline __device__ uint2 add(uint2 a, uint2 b) { uint2 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); return c; } inline __device__ uint4 add(uint4 a, uint4 b) { uint4 c; c.x = add(a.x, b.x); c.y = add(a.y, b.y); c.z = add(a.z, b.z); c.w = add(a.w, b.w); return c; } inline __device__ float2 add(uint32_t a, float2 fb) { float2 fa = half2_to_float2(a); return add(fa, fb); } inline __device__ Float8_ add(uint4 a, Float8_ fb) { Float8_ fc; fc.x = add(a.x, fb.x); fc.y = add(a.y, fb.y); fc.z = add(a.z, fb.z); fc.w = add(a.w, fb.w); return fc; } template <typename Acc, typename A, typename B> inline __device__ Acc mul(A a, B b); template <> inline __device__ float mul<float, float>(float a, float b) { return a * b; } template <> inline __device__ float2 mul(float2 a, float2 b) { float2 c; c.x = a.x * b.x; c.y = a.y * b.y; return c; } template <> inline __device__ float4 mul(float4 a, float4 b) { float4 c; c.x = a.x * b.x; c.y = a.y * b.y; c.z = a.z * b.z; c.w = a.w * b.w; return c; } template <> inline __device__ uint16_t mul(uint16_t a, uint16_t b) { uint16_t c; asm volatile("mul.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b)); return c; } template <> inline __device__ uint32_t mul(uint32_t a, uint32_t b) { uint32_t c; asm volatile("mul.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b)); return c; } template <> inline __device__ uint2 mul(uint2 a, uint2 b) { uint2 c; c.x = mul<uint32_t, uint32_t, uint32_t>(a.x, b.x); c.y = mul<uint32_t, uint32_t, uint32_t>(a.y, b.y); return c; } template <> inline __device__ uint4 mul(uint4 a, uint4 b) { uint4 c; c.x = mul<uint32_t, uint32_t, uint32_t>(a.x, b.x); c.y = mul<uint32_t, uint32_t, uint32_t>(a.y, b.y); c.z = mul<uint32_t, uint32_t, uint32_t>(a.z, b.z); c.w = mul<uint32_t, uint32_t, uint32_t>(a.w, b.w); return c; } inline __device__ float sum(float v) { return v; } inline __device__ float sum(float2 v) { return v.x + v.y; } inline __device__ float sum(float4 v) { return v.x + v.y + v.z + v.w; } inline __device__ float sum(uint16_t v) { return half_to_float(v); } inline __device__ float sum(uint32_t v) { float2 tmp = half2_to_float2(v); return tmp.x + tmp.y; } inline __device__ float sum(uint2 v) { uint32_t c = add(v.x, v.y); return sum(c); } inline __device__ float sum(uint4 v) { uint32_t c = add(v.x, v.y); c = add(c, v.z); c = add(c, v.w); return sum(c); } template <typename T> inline __device__ float dot(T a, T b) { return sum(mul<T, T, T>(a, b)); } template <typename A, typename T> inline __device__ float dot(T a, T b) { return sum(mul<A, T, T>(a, b)); } inline __device__ constexpr uint32_t shfl_mask(int threads) { return threads == 32 ? uint32_t(-1) : (1u << threads) - 1u; } template <typename T> inline __device__ __host__ T div_up(T m, T n) { return (m + n - 1) / n; } inline __device__ float fma(float a, float b, float c) { return a * b + c; } inline __device__ float2 fma(float2 a, float2 b, float2 c) { float2 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); return d; } inline __device__ float4 fma(float4 a, float4 b, float4 c) { float4 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); d.z = fma(a.z, b.z, c.z); d.w = fma(a.w, b.w, c.w); return d; } inline __device__ uint32_t fma(uint32_t a, uint32_t b, uint32_t c) { uint32_t d; asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(d) : "r"(a), "r"(b), "r"(c)); return d; } inline __device__ uint2 fma(uint2 a, uint2 b, uint2 c) { uint2 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); return d; } inline __device__ uint4 fma(uint4 a, uint4 b, uint4 c) { uint4 d; d.x = fma(a.x, b.x, c.x); d.y = fma(a.y, b.y, c.y); d.z = fma(a.z, b.z, c.z); d.w = fma(a.w, b.w, c.w); return d; } inline __device__ float2 fma(float a, float2 b, float2 c) { float2 d; d.x = fma(a, b.x, c.x); d.y = fma(a, b.y, c.y); return d; } inline __device__ float4 fma(float a, float4 b, float4 c) { float4 d; d.x = fma(a, b.x, c.x); d.y = fma(a, b.y, c.y); d.z = fma(a, b.z, c.z); d.w = fma(a, b.w, c.w); return d; } inline __device__ Float8_ fma(float a, Float8_ b, Float8_ c) { Float8_ d; d.x = fma(a, b.x, c.x); d.y = fma(a, b.y, c.y); d.z = fma(a, b.z, c.z); d.w = fma(a, b.w, c.w); return d; } inline __device__ uint32_t h0_h0(uint16_t a) { uint32_t b; asm volatile("mov.b32 %0, {%1, %1};" : "=r"(b) : "h"(a)); return b; } inline __device__ uint32_t fma(uint16_t a, uint32_t b, uint32_t c) { return fma(h0_h0(a), b, c); } inline __device__ uint2 fma(uint16_t a, uint2 b, uint2 c) { uint32_t s = h0_h0(a); uint2 d; d.x = fma(s, b.x, c.x); d.y = fma(s, b.y, c.y); return d; } inline __device__ uint4 fma(uint16_t a, uint4 b, uint4 c) { uint32_t s = h0_h0(a); uint4 d; d.x = fma(s, b.x, c.x); d.y = fma(s, b.y, c.y); d.z = fma(s, b.z, c.z); d.w = fma(s, b.w, c.w); return d; } inline __device__ float cast_to_float(float u) { return u; } inline __device__ float2 cast_to_float(float2 u) { return u; } inline __device__ float4 cast_to_float(float4 u) { return u; } inline __device__ Float8_ cast_to_float(uint4 u) { Float8_ tmp; tmp.x = half2_to_float2(u.x); tmp.y = half2_to_float2(u.y); tmp.z = half2_to_float2(u.z); tmp.w = half2_to_float2(u.w); return tmp; } template <int THREADS_PER_KEY, typename K_vec, int N> inline __device__ float qk_dot_(const K_vec (&q)[N], const K_vec (&k)[N]) { K_vec qk_vec = mul<K_vec, K_vec, K_vec>(q[0], k[0]); #pragma unroll for (int ii = 1; ii < N; ++ii) { qk_vec = fma(q[ii], k[ii], qk_vec); } float qk = sum(qk_vec); #pragma unroll for (int mask = THREADS_PER_KEY / 2; mask >= 1; mask /= 2) { qk += __shfl_xor_sync(uint32_t(-1), qk, mask); } return qk; } template <typename T, int THREADS_PER_KEY> struct Qk_dot { template <typename K_vec, int N> static inline __device__ float dot(const K_vec (&q)[N], const K_vec (&k)[N]) { return qk_dot_<THREADS_PER_KEY>(q, k); } }; template <int WARPS_PER_BLOCK, int WARP_SIZE = 32> inline __device__ float block_sum(float *red_smem, float sum) { int warp = threadIdx.x / WARP_SIZE; int lane = threadIdx.x % WARP_SIZE; #pragma unroll for (int mask = WARP_SIZE / 2; mask >= 1; mask /= 2) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } if (lane == 0) { red_smem[warp] = sum; } __syncthreads(); if (lane < WARPS_PER_BLOCK) { sum = red_smem[lane]; } #pragma unroll for (int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } return __shfl_sync(uint32_t(-1), sum, 0); } inline __device__ void convert_from_float(float &dst, float src) { // NOLINT dst = src; } inline __device__ void convert_from_float(float4 &dst, float4 src) { // NOLINT dst = src; } inline __device__ void convert_from_float(plat::float16 &dst, // NOLINT float src) { dst = static_cast<plat::float16>(src); } inline __device__ void convert_from_float(uint4 &dst, Float8_ src) { // NOLINT dst.x = float2_to_half2(src.x); dst.y = float2_to_half2(src.y); dst.z = float2_to_half2(src.z); dst.w = float2_to_half2(src.w); } inline __device__ void zero(uint16_t &dst) { dst = uint16_t(0); } // NOLINT template <typename T> inline __device__ void zero(T &dst) { // NOLINT constexpr int WORDS = sizeof(T) / 4; union { T raw; uint32_t words[WORDS]; } tmp; #pragma unroll for (int ii = 0; ii < WORDS; ++ii) { tmp.words[ii] = 0u; } dst = tmp.raw; } template <typename T, int Dh, int Dh_MAX, int THREADS_PER_KEY, int THREADS_PER_VALUE, int THREADS_PER_BLOCK> __global__ void masked_multihead_attention_kernel( Masked_multihead_attention_params<T> params, int pad_active_groups) { #if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) static_assert(Dh % THREADS_PER_KEY == 0, ""); static_assert(Dh % THREADS_PER_VALUE == 0, ""); constexpr int WARP_SIZE = 32; constexpr int WARPS_PER_BLOCK = THREADS_PER_BLOCK / WARP_SIZE; extern __shared__ char smem_[]; float *qk_smem = reinterpret_cast<float *>(smem_); char *logits_smem_ = smem_; // fp32 accum for logits float *logits_smem = reinterpret_cast<float *>(logits_smem_); T *out_smem = reinterpret_cast<T *>(smem_); __shared__ float red_smem[WARPS_PER_BLOCK * 2]; __shared__ T q_smem[Dh]; const int bi = blockIdx.y; const int hi = blockIdx.x; const int bhi = bi * params.num_head + hi; const int tid = threadIdx.x; float qk_max = -FLT_MAX; float qk = 0; // qkv [B, S=1, 3, num_head, head_dim] int qkv_base_offset = bi * 3 * params.num_head * Dh + hi * Dh; using Qk_vec = typename Qk_vec_<T, Dh_MAX>::Type; constexpr int QK_VEC_SIZE = sizeof(Qk_vec) / sizeof(T); static_assert(Dh % QK_VEC_SIZE == 0 && Dh / QK_VEC_SIZE <= WARP_SIZE, ""); constexpr int QK_VECS_PER_WARP = Dh / QK_VEC_SIZE; // cache_k, [B, num_head, head_dim / x, max_seq_len, x] // x == 4/8 for FP32/FP16, 128bit, 16Byte constexpr int QK_ELTS_IN_16B = 16 / sizeof(T); constexpr int QK_VECS_IN_16B = 16 / sizeof(Qk_vec); const T *q_base = params.qkv; const T *k_base = params.qkv + params.num_head * Dh; const T *q_bias_base = params.qkv_bias; const T *k_bias_base = params.qkv_bias + params.num_head * Dh; if (tid < QK_VECS_PER_WARP) { int qk_offset = qkv_base_offset + tid * QK_VEC_SIZE; int qk_bias_offset = hi * Dh + tid * QK_VEC_SIZE; Qk_vec q = *reinterpret_cast<const Qk_vec *>(&q_base[qk_offset]); Qk_vec k = *reinterpret_cast<const Qk_vec *>(&k_base[qk_offset]); Qk_vec q_bias = *reinterpret_cast<const Qk_vec *>(&q_bias_base[qk_bias_offset]); Qk_vec k_bias = *reinterpret_cast<const Qk_vec *>(&k_bias_base[qk_bias_offset]); q = add(q, q_bias); // TODO(wangxi): See this https://github.com/microsoft/unilm/issues/510 // we may not require k_bias. k = add(k, k_bias); *reinterpret_cast<Qk_vec *>(&q_smem[tid * QK_VEC_SIZE]) = q; int co = tid / QK_VECS_IN_16B; int ci = (tid % QK_VECS_IN_16B) * QK_VEC_SIZE; int offset = bhi * params.max_seq_length * Dh + co * params.max_seq_length * QK_ELTS_IN_16B + params.timestep * QK_ELTS_IN_16B + ci; *reinterpret_cast<Qk_vec *>(&params.cache_kv[offset]) = k; qk = dot<Qk_vec, Qk_vec>(q, k); } if (tid < WARP_SIZE) { for (int mask = WARP_SIZE / 2; mask >= 1; mask /= 2) { qk += __shfl_xor_sync(uint32_t(-1), qk, mask); } if (tid == 0) { // NOTE(wangxi): mask must be 0.0 // T mask = params.attn_mask[ // bi * (params.timestep + 1) + params.timestep]; // qk += static_cast<float>(mask); qk *= params.inv_sqrt_dh; qk_max = qk; qk_smem[params.timestep] = qk; } } __syncthreads(); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER if (bi == 0 && hi == 0 && tid == 0) { printf("=======q_out=======\n"); for (int i = 0; i < Dh; ++i) printf("%f ", static_cast<float>(q_smem[i])); printf("\n"); } __syncthreads(); #endif using K_vec = typename K_vec_<T, THREADS_PER_KEY>::Type; constexpr int K_VEC_SIZE = sizeof(K_vec) / sizeof(T); static_assert(Dh % K_VEC_SIZE == 0, ""); constexpr int K_ELTS_PER_THREAD = Dh / THREADS_PER_KEY; constexpr int K_VECS_PER_THREAD = K_ELTS_PER_THREAD / K_VEC_SIZE; int ko = tid / THREADS_PER_KEY; int ki = (tid % THREADS_PER_KEY) * K_VEC_SIZE; K_vec q[K_VECS_PER_THREAD]; #pragma unroll for (int i = 0; i < K_VECS_PER_THREAD; ++i) { q[i] = *reinterpret_cast<const K_vec *>( &q_smem[ki + i * THREADS_PER_KEY * K_VEC_SIZE]); } constexpr int K_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_KEY; constexpr int K_PER_WARP = WARP_SIZE / THREADS_PER_KEY; T *k_cache = &params.cache_kv[bhi * params.max_seq_length * Dh + ki]; int ti_end = div_up(params.timestep, K_PER_WARP) * K_PER_WARP; for (int ti = ko; ti < ti_end; ti += K_PER_ITER) { K_vec k[K_VECS_PER_THREAD]; #pragma unroll for (int ii = 0; ii < K_VECS_PER_THREAD; ++ii) { int jj = ii * params.max_seq_length + ti; if (ti < params.timestep) { k[ii] = *reinterpret_cast<const K_vec *>(&k_cache[jj * QK_ELTS_IN_16B]); } } float qk = Qk_dot<T, THREADS_PER_KEY>::dot(q, k) * params.inv_sqrt_dh; // bool is_mask = false; if (ti < params.timestep && tid % THREADS_PER_KEY == 0) { // qk_max = is_mask ? qk_max : fmaxf(qk_max, qk); T mask = params.attn_mask[bi * (params.timestep + 1) + ti]; qk += static_cast<float>(mask); qk_max = fmaxf(qk_max, qk); qk_smem[ti] = qk; } } #pragma unroll for (int mask = WARP_SIZE / 2; mask >= THREADS_PER_KEY; mask /= 2) { qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask)); } const int warp = tid / WARP_SIZE; const int lane = tid % WARP_SIZE; if (lane == 0) { red_smem[warp] = qk_max; } __syncthreads(); qk_max = lane < WARPS_PER_BLOCK ? red_smem[lane] : -FLT_MAX; #pragma unroll for (int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2) { qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask)); } qk_max = __shfl_sync(uint32_t(-1), qk_max, 0); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER if (bi == 0 && hi == 0 && tid == 0) { printf("=======qk_out=======\n"); for (int i = 0; i <= params.timestep; ++i) printf("%f ", qk_smem[i]); printf("qk_max=%f\n", qk_max); } __syncthreads(); #endif float sum = 0.f; for (int ti = tid; ti <= params.timestep; ti += THREADS_PER_BLOCK) { // bool is_mask = false; // float logit = is_mask ? 0.f : __expf(qk_smem[ti] - qk_max); float logit = __expf(qk_smem[ti] - qk_max); sum += logit; qk_smem[ti] = logit; } sum = block_sum<WARPS_PER_BLOCK>(&red_smem[WARPS_PER_BLOCK], sum); // FIXME(wangxi): need add 1.e-6f? float inv_sum = __fdividef(1.f, sum + 1.e-6f); for (int ti = tid; ti <= params.timestep; ti += THREADS_PER_BLOCK) { convert_from_float(logits_smem[ti], qk_smem[ti] * inv_sum); } __syncthreads(); constexpr int V_VEC_SIZE = Dh / THREADS_PER_VALUE; using V_vec = typename V_vec_<T, V_VEC_SIZE>::Type; int vo = tid / THREADS_PER_VALUE; int vi = (tid % THREADS_PER_VALUE) * V_VEC_SIZE; T *v_cache = &params.cache_kv[params.batch_size * params.num_head * params.max_seq_length * Dh + bhi * params.max_seq_length * Dh + vi]; #ifdef MMHA_USE_FP32_ACUM_FOR_OUT using V_vec_acum = typename V_vec_acum_fp32_<V_vec>::Type; #else using V_vec_acum = V_vec; #endif V_vec_acum out; zero(out); constexpr int V_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_VALUE; if (vo < V_PER_ITER) { for (int ti = vo; ti < params.timestep; ti += V_PER_ITER) { V_vec v = *reinterpret_cast<const V_vec *>(&v_cache[ti * Dh]); #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) float logit = logits_smem[ti]; out = fma(logit, cast_to_float(v), out); #else T logit = logits_smem[ti]; // Update the partial sums. out = fma(logit, v, out); #endif } } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER if (bi == 0 && hi == 0 && tid == 0) { printf("======logits_out=====\n"); for (int i = 0; i <= params.timestep; ++i) printf("%f ", logits_smem[i]); printf("\n"); } __syncthreads(); #endif if (vo == (params.timestep % V_PER_ITER)) { V_vec v = *reinterpret_cast<const V_vec *>( &params.qkv[2 * params.num_head * Dh + qkv_base_offset + vi]); V_vec v_bias = *reinterpret_cast<const V_vec *>( &params.qkv_bias[2 * params.num_head * Dh + hi * Dh + vi]); v = add(v, v_bias); *reinterpret_cast<V_vec *>(&v_cache[params.timestep * Dh]) = v; #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) out = fma(logits_smem[params.timestep], cast_to_float(v), out); #else out = fma(logits_smem[params.timestep], v, out); #endif } __syncthreads(); if (vo < pad_active_groups / 2) { zero(*reinterpret_cast<V_vec *>(&out_smem[vo * Dh + vi])); } #pragma unroll for (int active_groups = pad_active_groups; active_groups >= 2; active_groups /= 2) { int midpoint = active_groups / 2; if (vo >= midpoint && vo < active_groups) { #ifdef MMHA_USE_FP32_ACUM_FOR_OUT convert_from_float( *reinterpret_cast<V_vec *>(&out_smem[(vo - midpoint) * Dh + vi]), out); #else *reinterpret_cast<V_vec *>(&out_smem[(vo - midpoint) * Dh + vi]) = out; #endif } __syncthreads(); if (vo < midpoint) { out = add(*reinterpret_cast<const V_vec *>(&out_smem[vo * Dh + vi]), out); } __syncthreads(); } if (vo == 0) { #ifdef MMHA_USE_FP32_ACUM_FOR_OUT convert_from_float(*reinterpret_cast<V_vec *>(&params.out[bhi * Dh + vi]), out); #else *reinterpret_cast<V_vec *>(&params.out[bhi * Dh + vi]) = out; #endif } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER __syncthreads(); if (bi == 0 && hi == 0 && tid == 0) { printf("======fmha_out=====\n"); for (int i = 0; i < Dh; ++i) printf("%f ", static_cast<float>(params.out[i])); printf("\n"); } #endif #else assert(false); #endif } template <typename T> inline size_t smem_size_in_bytes( const Masked_multihead_attention_params<T> &params, int dim_head, int threads_per_value, int threads_per_block, int pad_active_groups) { size_t qk_sz = div_up(params.timestep + 1, 4) * 16; size_t logits_sz = 0; #ifndef MMHA_USE_FP32_ACUM_FOR_LOGITS if (sizeof(T) != 4) { logits_sz = div_up(params.max_seq_length, 4) * 4 * sizeof(T); } #endif size_t softmax_sz = qk_sz + logits_sz; int rows_per_red = pad_active_groups; size_t red_sz = rows_per_red * dim_head * sizeof(T) / 2; return max(softmax_sz, red_sz); } #define MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, \ THDS_PER_BLOCK, stream) \ int pad_active_groups = \ 1 << static_cast<int>(ceil(std::log2(THDS_PER_BLOCK / THDS_PER_VALUE))); \ size_t smem_sz = smem_size_in_bytes<T>(params, Dh, THDS_PER_VALUE, \ THDS_PER_BLOCK, pad_active_groups); \ dim3 grid(params.num_head, params.batch_size); \ masked_multihead_attention_kernel<T, Dh, Dh_MAX, THDS_PER_KEY, \ THDS_PER_VALUE, THDS_PER_BLOCK> \ <<<grid, THDS_PER_BLOCK, smem_sz, stream>>>(params, pad_active_groups) template <typename T, int Dh, int Dh_MAX> void fmha_launch_kernel(const Masked_multihead_attention_params<T> &params, const cudaStream_t &stream) { constexpr int THREADS_PER_VALUE = Dh * sizeof(T) / 16; if (params.timestep < 32) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, stream); } else if (params.timestep < 2048) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, stream); } else { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, stream); } } template <typename T> void fmha(const platform::CUDADeviceContext &dev_ctx, const Tensor &qkv_tensor, const Tensor &qkv_bias_tensor, const Tensor &src_mask_tensor, Tensor *cache_kv_tensor, Tensor *out_tensor, int batch_size, int max_seq_length, int num_head, int dim_head, int timestep, float inv_sqrt_dh) { Masked_multihead_attention_params<T> params; params.out = out_tensor->data<T>(); params.qkv = qkv_tensor.data<T>(); params.qkv_bias = qkv_bias_tensor.data<T>(); params.attn_mask = src_mask_tensor.data<T>(); params.cache_kv = cache_kv_tensor->data<T>(); params.batch_size = batch_size; params.num_head = num_head; params.timestep = timestep; params.max_seq_length = max_seq_length; params.inv_sqrt_dh = inv_sqrt_dh; switch (dim_head) { case 32: fmha_launch_kernel<T, 32, 32>(params, dev_ctx.stream()); break; case 64: fmha_launch_kernel<T, 64, 64>(params, dev_ctx.stream()); break; case 96: fmha_launch_kernel<T, 96, 128>(params, dev_ctx.stream()); break; case 128: fmha_launch_kernel<T, 128, 128>(params, dev_ctx.stream()); break; default: PADDLE_THROW(platform::errors::Unimplemented( "dim_head = %d is unsupport, only support " "dim_head = 32, 64, 96 or 128 for now.", dim_head)); } } // NOTE: simd with 16Bytes(128bit), float is 4, float16 is 8 constexpr int VEC_16B = 16; template <typename T> __global__ void write_cache_k_kernel(T *cache_k, const T *k, const int num_head, const int dim_head, const int seq_len, const int max_seq_len) { const int bi = blockIdx.y; const int hi = blockIdx.z; constexpr int X_ELEMS = VEC_16B / sizeof(T); // [bsz, num_head, seq_len, dim_head/x, x] auto k_src = reinterpret_cast<const uint4 *>( k + bi * num_head * seq_len * dim_head + hi * seq_len * dim_head); // [bsz, num_head, dim_head/x, max_seq_len, x] auto k_dst = reinterpret_cast<uint4 *>( cache_k + bi * num_head * max_seq_len * dim_head + hi * max_seq_len * dim_head); const int out_idx = blockIdx.x * blockDim.x + threadIdx.x; // vec size int dim_head_div_x = dim_head / X_ELEMS; // FIXME(wangxi): num_head is not need? // if (out_idx >= num_head * dim_head_div_x * max_seq_len) return; if (out_idx >= dim_head_div_x * max_seq_len) return; int idx = out_idx; const int k_seq_len_id = idx % max_seq_len; // idx = (idx - k_seq_len_id) / max_seq_len; idx = idx / max_seq_len; const int k_vec_id = idx % dim_head_div_x; if (k_seq_len_id < seq_len) { k_dst[out_idx] = k_src[k_seq_len_id * dim_head_div_x + k_vec_id]; } } template <typename T> __global__ void write_cache_v_kernel(T *cache_v, const T *v, const int num_head, const int dim_head, const int seq_len, const int max_seq_len) { const int bi = blockIdx.y; const int hi = blockIdx.z; // [bsz, num_head, seq_len, dim_head/x, x] auto v_src = reinterpret_cast<const uint4 *>( v + bi * num_head * seq_len * dim_head + hi * seq_len * dim_head); // [bsz, num_head, max_seq_len, dim_head/x, x] auto v_dst = reinterpret_cast<uint4 *>( cache_v + bi * num_head * max_seq_len * dim_head + hi * max_seq_len * dim_head); const int idx = blockIdx.x * blockDim.x + threadIdx.x; constexpr int X_ELEMS = VEC_16B / sizeof(T); const int dim_head_div_x = dim_head / X_ELEMS; if (idx >= dim_head_div_x * seq_len) return; v_dst[idx] = v_src[idx]; } template <typename T> void write_cache_kv(const platform::CUDADeviceContext &dev_ctx, T *cache_k, T *cache_v, const T *k, const T *v, const int bsz, const int num_head, const int seq_len, const int max_seq_len, const int dim_head) { constexpr int block_sz = 128; constexpr int x = VEC_16B / sizeof(T); assert(dim_head % x == 0); PADDLE_ENFORCE_EQ( dim_head % x, 0, platform::errors::PreconditionNotMet( "dim_head=%d must be divisible by vec_size=%d", dim_head, x)); int max_size = max_seq_len * dim_head / x; int size = seq_len * dim_head / x; dim3 grid(div_up(max_size, block_sz), bsz, num_head); dim3 grid_v(div_up(size, block_sz), bsz, num_head); // transpose [bsz, num_head, seq_len, dim_head/x, x]-> // [bsz, num_head, dim_head/x, max_seq_len, x] write_cache_k_kernel<<<grid, block_sz, 0, dev_ctx.stream()>>>( cache_k, k, num_head, dim_head, seq_len, max_seq_len); // copy [bsz, num_head, seq_len, dim_head/x, x]-> // [bsz, num_head, max_seq_len, dim_head/x, x] write_cache_v_kernel<<<grid_v, block_sz, 0, dev_ctx.stream()>>>( cache_v, v, num_head, dim_head, seq_len, max_seq_len); } } // namespace template <typename T> class FusedMultiTransformerOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; auto place = ctx.GetPlace(); auto &dev_ctx = ctx.cuda_device_context(); auto *time_step = ctx.Input<Tensor>("TimeStep"); // 0. input auto *input_x = ctx.Input<Tensor>("X"); const auto input_x_dims = input_x->dims(); int bsz = input_x_dims[0]; int seq_len = input_x_dims[1]; int dim_embed = input_x_dims[2]; int bsz_seq = bsz * seq_len; // 1. layer norm const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm"); const float epsilon = ctx.Attr<float>("epsilon"); auto ln_scales = ctx.MultiInput<Tensor>("LnScale"); auto ln_biases = ctx.MultiInput<Tensor>("LnBias"); auto ln_compute = AttnLayerNorm<T>(dev_ctx, epsilon, bsz_seq, dim_embed); Tensor ln_mean, ln_var; auto *ln_mean_data = ln_mean.mutable_data<U>({bsz_seq}, place); auto *ln_var_data = ln_var.mutable_data<U>({bsz_seq}, place); // 2. qkv // x: qkv's input [batch_size, seq_len, dim_embed] // y: qkv's weight: [3, num_head, dim_head, dim_embed] auto qkv_weights = ctx.MultiInput<Tensor>("QKVW"); auto qkv_biases = ctx.MultiInput<Tensor>("QKVBias"); const auto qkv_w_dims = qkv_weights[0]->dims(); int num_head = qkv_w_dims[1]; int dim_head = qkv_w_dims[2]; int hidden_size = num_head * dim_head; int output_size = 3 * hidden_size; int input_size = dim_embed; bool compute_bias = qkv_biases.size() > 0 && time_step == nullptr; // (transA, transB, compute_bias) = (false, true, false) auto qkv_compute = AttnMatMul<T>(dev_ctx, false, true, bsz_seq, output_size, input_size, compute_bias); Tensor qkv_out; auto *qkv_out_data = qkv_out.mutable_data<T>({bsz, seq_len, 3, num_head, dim_head}, place); // 3. fmha AttnDropoutParam attn_param(true, "upscale_in_train", 0.0, true, true, 0, nullptr); auto fmha_compute = FMHARef<T>(dev_ctx, bsz, seq_len, num_head, dim_head, attn_param); auto *src_mask = ctx.Input<Tensor>("SrcMask"); auto cache_kvs = ctx.MultiInput<Tensor>("CacheKV"); auto cache_kv_outs = ctx.MultiOutput<Tensor>("CacheKVOut"); // auto *time_step = ctx.Input<Tensor>("TimeStep"); auto out_seq_len = seq_len; if (time_step) { PADDLE_ENFORCE_EQ(time_step->place(), platform::CPUPlace(), platform::errors::PreconditionNotMet( "The place of input(TimeStep) must be CPUPlace.")); // cache_seq_len int time_step_value = time_step->data<int>()[0]; PADDLE_ENFORCE_GT(time_step_value, 0, platform::errors::PreconditionNotMet( "The value of time_step must > 0, but now is %d", time_step_value)); PADDLE_ENFORCE_EQ( seq_len, 1, platform::errors::PreconditionNotMet( "In decode stage, the seq_len of input must be 1, but now is %d", seq_len)); out_seq_len += time_step_value; } Tensor transpose_out_2, qk_out; auto *transpose_out_2_data = transpose_out_2.mutable_data<T>( {3, bsz, num_head, seq_len, dim_head}, place); auto *qk_out_data = qk_out.mutable_data<T>({bsz, num_head, seq_len, out_seq_len}, place); Tensor softmax_out; Tensor attn_dropout_mask_out, attn_dropout_out; Tensor qktv_out, fmha_out; auto *softmax_out_data = softmax_out.mutable_data<T>( {bsz, num_head, seq_len, out_seq_len}, place); auto *attn_dropout_mask_out_data = attn_dropout_mask_out.mutable_data<T>( {bsz, num_head, seq_len, out_seq_len}, place); auto *attn_dropout_data_data = attn_dropout_out.mutable_data<T>( {bsz, num_head, seq_len, out_seq_len}, place); auto *qktv_out_data = qktv_out.mutable_data<T>({bsz, num_head, seq_len, dim_head}, place); auto *fmha_out_data = fmha_out.mutable_data<T>({bsz, seq_len, num_head, dim_head}, place); // 4. out_linear auto out_linear_weights = ctx.MultiInput<Tensor>("OutLinearW"); auto out_linear_biases = ctx.MultiInput<Tensor>("OutLinearBias"); int ring_id = ctx.Attr<int>("ring_id"); // (transA, transB, compute_bias) = (false, false, false) auto out_linear_compute = AttnMatMul<T>(dev_ctx, false, false, bsz_seq, dim_embed, hidden_size, false); // 5. ln(residual + bias) DropoutParam dropout_param2(true, 0, true, true, 0.0, nullptr, 0); FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper( dev_ctx, bsz_seq, dim_embed, dropout_param2, epsilon); auto ffn_ln_scales = ctx.MultiInput<Tensor>("FFNLnScale"); auto ffn_ln_biases = ctx.MultiInput<Tensor>("FFNLnBias"); Tensor bias_dropout_residual_out, dropout_mask_out; auto *bias_dropout_residual_out_data = bias_dropout_residual_out.mutable_data<T>({bsz, seq_len, dim_embed}, place); auto *dropout_mask_out_data = dropout_mask_out.mutable_data<uint8_t>( {bsz, seq_len, dim_embed}, place); // 6. ffn matmul1 auto ffn1_weights = ctx.MultiInput<Tensor>("FFN1Weight"); auto ffn1_biases = ctx.MultiInput<Tensor>("FFN1Bias"); auto ffn1_weight_dim = ffn1_weights[0]->dims(); int dim_ffn = ffn1_weight_dim[1]; auto ffn1_linear_compute = AttnMatMul<T>(dev_ctx, false, false, bsz_seq, dim_ffn, dim_embed, false); Tensor ffn1_out; auto *ffn1_out_data = ffn1_out.mutable_data<T>({bsz_seq, dim_ffn}, place); // 7. ffn act + bias DropoutParam ffn1_dropout_param(true, 0, true, true, 0.0, nullptr, 0); FusedDropoutHelper<T, uint8_t> fused_act_dropout_helper( dev_ctx, bsz_seq, dim_ffn, ffn1_dropout_param); Tensor ffn1_dropout_out, ffn1_dropout_mask; auto *ffn1_dropout_out_data = ffn1_dropout_out.mutable_data<T>({bsz_seq, dim_ffn}, place); auto *ffn1_dropout_mask_data = ffn1_dropout_mask.mutable_data<uint8_t>({bsz_seq, dim_ffn}, place); // 8. ffn2 matmul auto ffn2_weights = ctx.MultiInput<Tensor>("FFN2Weight"); auto ffn2_biases = ctx.MultiInput<Tensor>("FFN2Bias"); auto ffn2_linear_compute = AttnMatMul<T>(dev_ctx, false, false, bsz_seq, dim_embed, dim_ffn, false); // 9. ffn2 residual bias DropoutParam ffn2_dropout_param(true, 0, true, true, 0.0, nullptr, 0); FusedDropoutLayerNormHelper<T, uint8_t> ffn2_fused_dropout_helper( dev_ctx, bsz_seq, dim_embed, ffn2_dropout_param, epsilon); // calc auto *out = ctx.Output<Tensor>("Out"); auto *from_data = out->mutable_data<T>(place); Tensor *from_tensor = out; Tensor tmp_out; auto *tmp_out_data = tmp_out.mutable_data<T>({bsz, seq_len, dim_embed}, place); auto *x_data = input_x->data<T>(); Tensor *buf0 = nullptr; Tensor *buf1 = nullptr; // step0: x --> buf1 // step1: buf1 --> buf0 // step2: buf0 --> buf1 int layers = qkv_weights.size(); if (layers & 1) { // odd, set buf1 as out buf0 = &tmp_out; buf1 = out; } else { // even, set buf0 as out buf0 = out; buf1 = &tmp_out; } for (int i = 0; i < layers; ++i) { // step1. layer_norm if (i == 0 && pre_layer_norm) { auto *ln_scale_data = ln_scales[i]->data<U>(); auto *ln_bias_data = ln_biases[i]->data<U>(); // TODO(wangxi): can remove mean var in inference ln_compute.ComputeForward(x_data, ln_scale_data, ln_bias_data, buf1->data<T>(), ln_mean_data, ln_var_data); } else if (!pre_layer_norm) { PADDLE_THROW(platform::errors::Unimplemented( "Unimplemented post_layer_norm for now.")); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step1"; #endif // step2. qkv const Tensor *qkv_bias = qkv_biases.size() > 0 ? qkv_biases[i] : nullptr; // NOTE: in decoder stage, bias is fused in fmha const Tensor *bias = time_step ? nullptr : qkv_bias; qkv_compute.ComputeForward(qkv_weights[i], buf1, bias, &qkv_out, &qkv_out); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step2"; #endif // step3. fmha const Tensor *cache_kv = cache_kvs.size() > 0 ? cache_kvs[i] : nullptr; Tensor *cache_kv_out = cache_kv ? cache_kv_outs[i] : nullptr; if (time_step) { // generation decoder stage // [2, batch_size, num_head, max_seq_len, head_size] int max_seq_len = cache_kv->dims()[3]; fmha<T>(dev_ctx, qkv_out, *qkv_bias, *src_mask, cache_kv_out, &fmha_out, bsz, max_seq_len, num_head, dim_head, time_step->data<int>()[0], 1. / sqrt(dim_head)); } else if (cache_kv_out) { // generation context stage // TODO(wangxi): can remove dropout in inference fmha_compute.ComputeForward(qkv_out, nullptr, src_mask, &transpose_out_2, nullptr, &qk_out, nullptr, &softmax_out, &attn_dropout_mask_out, &attn_dropout_out, &qktv_out, &fmha_out); // [3, bsz, num_head, seq_len, head_dim] T *qkv_data = transpose_out_2_data; int64_t q_size = bsz * seq_len * num_head * dim_head; int64_t k_size = q_size; const T *q_ptr = qkv_data; const T *k_ptr = q_ptr + q_size; const T *v_ptr = k_ptr + k_size; // [2, bsz, num_head, max_seq_len, head_dim] int max_seq_len = cache_kv_out->dims()[3]; T *cache_kv_data = cache_kv_out->data<T>(); int64_t cache_k_size = bsz * num_head * max_seq_len * dim_head; T *cache_k_ptr = cache_kv_data; T *cache_v_ptr = cache_kv_data + cache_k_size; write_cache_kv<T>(dev_ctx, cache_k_ptr, cache_v_ptr, k_ptr, v_ptr, bsz, num_head, seq_len, max_seq_len, dim_head); } else { // not generation // TODO(wangxi): can remove dropout in inference fmha_compute.ComputeForward( qkv_out, cache_kv, src_mask, &transpose_out_2, cache_kv_out, &qk_out, nullptr, &softmax_out, &attn_dropout_mask_out, &attn_dropout_out, &qktv_out, &fmha_out); } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step3"; #endif // step4. out_linear out_linear_compute.ComputeForward(out_linear_weights[i], &fmha_out, nullptr, buf1, nullptr); AllReduce<T>(*buf1, ring_id, dev_ctx); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step4"; #endif // step5. ln(residual + dropout(input + bias)) if (pre_layer_norm) { auto *ln_scale_data = ffn_ln_scales[i]->data<U>(); auto *ln_bias_data = ffn_ln_biases[i]->data<U>(); auto *out_linear_bias_data = out_linear_biases[i]->data<T>(); // inplace fused_dropout_layernorm_helper.LayernormResidualDropoutBias( dev_ctx, buf1->data<T>(), x_data, out_linear_bias_data, ln_scale_data, ln_bias_data, bias_dropout_residual_out_data, dropout_mask_out_data, buf1->data<T>(), ln_mean_data, ln_var_data); } else { } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step5"; #endif // step6. ffn matmul1 ffn1_linear_compute.ComputeForward(ffn1_weights[i], buf1, nullptr, &ffn1_out, nullptr); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step6"; #endif // step7. act bias // TODO(wangxi): remove dropout mask in inference fused_act_dropout_helper.DropoutActBias( dev_ctx, ffn1_out_data, ffn1_biases[i]->data<T>(), "gelu", ffn1_dropout_out_data, ffn1_dropout_mask_data); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step7"; #endif // step8. ffn matmul2 ffn2_linear_compute.ComputeForward(ffn2_weights[i], &ffn1_dropout_out, nullptr, buf1, nullptr); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step8.0"; #endif AllReduce<T>(*buf1, ring_id, dev_ctx); #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step8.1"; #endif // step9. residual bias if (pre_layer_norm) { // TODO(wangxi): remove dropout mask in inference if (i < layers - 1) { auto *ln_scale_data = ln_scales[i + 1]->data<U>(); auto *ln_bias_data = ln_biases[i + 1]->data<U>(); ffn2_fused_dropout_helper.LayernormResidualDropoutBias( dev_ctx, buf1->data<T>(), bias_dropout_residual_out_data, ffn2_biases[i]->data<T>(), ln_scale_data, ln_bias_data, buf1->data<T>(), dropout_mask_out_data, buf0->data<T>(), ln_mean_data, ln_var_data); } else { ffn2_fused_dropout_helper.ResidualDropoutBias( dev_ctx, buf1->data<T>(), bias_dropout_residual_out_data, ffn2_biases[i]->data<T>(), buf1->data<T>(), dropout_mask_out_data); } } else { } #ifdef _DEBUG_FUSED_MULTI_TRANSFORMER VLOG(0) << "step9"; #endif x_data = buf1->data<T>(); std::swap(buf0, buf1); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(fused_multi_transformer, ops::FusedMultiTransformerOpKernel<plat::float16>, ops::FusedMultiTransformerOpKernel<float>);
d10a41e24fed1f092a43820b15d6b714f116adfc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> __global__ void pair_wise_product(float *a, float *b, float *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = a[i] * b[i]; } __global__ void vectorSum(float *a, float *b, float *c){ int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = a[i] + b[i]; } __global__ void reduction_num_3(float *c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { c[tid] += c[tid + s]; } __syncthreads(); } } int main(int argc, char *argv[]){ unsigned int length = 4194304; int i, Size; float *a, *b, *c, *copyC, *gpuA, *gpuB, *gpuC; time_t seed; hipEvent_t start; hipEvent_t stop; float msecTotal; hipEventCreate(&start); hipEventCreate(&stop); if (argc>1) { sscanf(argv[1],"%d",&length); } Size = sizeof(float)*length; a = (float *)calloc(length, sizeof(float)); b = (float *)calloc(length, sizeof(float)); c = (float *)calloc(length, sizeof(float)); copyC = (float *)calloc(length, sizeof(float)); time(&seed); srand48(seed); for (i=0; i<length; i++) a[i] = drand48(), b[i] = drand48(); hipSetDevice(0); int padded_length = floor((length + (512*32 - 1))/(1.0*512*32)) * (512*32); hipError_t error; error = hipMalloc((void**)&gpuA, padded_length*sizeof(float)); if (error != hipSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } error = hipMemset(gpuA, 0, padded_length*sizeof(float)); if (error != hipSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } error = hipMalloc((void**)&gpuB, padded_length*sizeof(float)); if (error != hipSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } error = hipMemset(gpuB, 0, padded_length*sizeof(float)); if (error != hipSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } error = hipMalloc((void**)&gpuC, padded_length*sizeof(float)); if (error != hipSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } error = hipMemset(gpuC, 0, padded_length*sizeof(float)); if (error != hipSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } hipEventRecord(start, NULL); for (i=0; i<length; i++) c[i] = a[i] * b[i]; for (i = 1; i < length; i++) { c[0] += c[i]; } hipEventRecord(stop, NULL); hipEventSynchronize(stop); hipEventElapsedTime(&msecTotal, start, stop); printf("cpu time: %.3f ms\n", msecTotal); error = hipMemcpy(gpuA, a, Size, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } error = hipMemcpy(gpuB, b, Size, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } dim3 numThreads(512, 1); dim3 numBlocks(32, 1); hipEventRecord(start, NULL); hipLaunchKernelGGL(( pair_wise_product), dim3(numBlocks), dim3(numThreads), 0, 0, gpuA, gpuB, gpuC); hipDeviceSynchronize(); hipEventRecord(stop, NULL); hipEventSynchronize(stop); //hipEventRecord(start, NULL); error = hipMemcpy(copyC, gpuC, Size, hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } /*for (i=0; i < length; i++) { printf("%f ", copyC[i]); } printf("\n");*/ hipEventRecord(start, NULL); hipLaunchKernelGGL(( reduction_num_3), dim3(numBlocks), dim3(numThreads), 0, 0, gpuC); hipDeviceSynchronize(); hipEventRecord(stop, NULL); hipEventSynchronize(stop); error = hipMemcpy(copyC, gpuC, Size, hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } hipEventElapsedTime(&msecTotal, start, stop); printf("gpu time: %.3f ms\n", msecTotal); //printf("%f\n", copyC[0]); for (i=0; i<length; i++) if (fabs(c[i]-copyC[i]) > 0.000001){ printf("%d\t%f\t%f\n", i, c[i], copyC[i]); return 1; } return 0; }
d10a41e24fed1f092a43820b15d6b714f116adfc.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> __global__ void pair_wise_product(float *a, float *b, float *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = a[i] * b[i]; } __global__ void vectorSum(float *a, float *b, float *c){ int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = a[i] + b[i]; } __global__ void reduction_num_3(float *c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { c[tid] += c[tid + s]; } __syncthreads(); } } int main(int argc, char *argv[]){ unsigned int length = 4194304; int i, Size; float *a, *b, *c, *copyC, *gpuA, *gpuB, *gpuC; time_t seed; cudaEvent_t start; cudaEvent_t stop; float msecTotal; cudaEventCreate(&start); cudaEventCreate(&stop); if (argc>1) { sscanf(argv[1],"%d",&length); } Size = sizeof(float)*length; a = (float *)calloc(length, sizeof(float)); b = (float *)calloc(length, sizeof(float)); c = (float *)calloc(length, sizeof(float)); copyC = (float *)calloc(length, sizeof(float)); time(&seed); srand48(seed); for (i=0; i<length; i++) a[i] = drand48(), b[i] = drand48(); cudaSetDevice(0); int padded_length = floor((length + (512*32 - 1))/(1.0*512*32)) * (512*32); cudaError_t error; error = cudaMalloc((void**)&gpuA, padded_length*sizeof(float)); if (error != cudaSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } error = cudaMemset(gpuA, 0, padded_length*sizeof(float)); if (error != cudaSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } error = cudaMalloc((void**)&gpuB, padded_length*sizeof(float)); if (error != cudaSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } error = cudaMemset(gpuB, 0, padded_length*sizeof(float)); if (error != cudaSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } error = cudaMalloc((void**)&gpuC, padded_length*sizeof(float)); if (error != cudaSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } error = cudaMemset(gpuC, 0, padded_length*sizeof(float)); if (error != cudaSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } cudaEventRecord(start, NULL); for (i=0; i<length; i++) c[i] = a[i] * b[i]; for (i = 1; i < length; i++) { c[0] += c[i]; } cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); cudaEventElapsedTime(&msecTotal, start, stop); printf("cpu time: %.3f ms\n", msecTotal); error = cudaMemcpy(gpuA, a, Size, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } error = cudaMemcpy(gpuB, b, Size, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } dim3 numThreads(512, 1); dim3 numBlocks(32, 1); cudaEventRecord(start, NULL); pair_wise_product<<<numBlocks, numThreads>>>(gpuA, gpuB, gpuC); cudaDeviceSynchronize(); cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); //cudaEventRecord(start, NULL); error = cudaMemcpy(copyC, gpuC, Size, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } /*for (i=0; i < length; i++) { printf("%f ", copyC[i]); } printf("\n");*/ cudaEventRecord(start, NULL); reduction_num_3<<<numBlocks, numThreads>>>(gpuC); cudaDeviceSynchronize(); cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); error = cudaMemcpy(copyC, gpuC, Size, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("oops, %d, error: %d\n", __LINE__, error); exit(EXIT_FAILURE); } cudaEventElapsedTime(&msecTotal, start, stop); printf("gpu time: %.3f ms\n", msecTotal); //printf("%f\n", copyC[0]); for (i=0; i<length; i++) if (fabs(c[i]-copyC[i]) > 0.000001){ printf("%d\t%f\t%f\n", i, c[i], copyC[i]); return 1; } return 0; }
6690543220411b31f7852826f612bcde6d84dede.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> /* experiment with N */ #define N 1000000 #define THREADS_PER_BLOCK 1000 __global__ void add(int *a, int *b, int *c) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) c[index] = a[index] + b[index]; }//funcion de kernel cuda int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof( int ); float tiempo1, tiempo2; hipEvent_t inicio1, fin1, inicio2, fin2; // para medir tiempos como con timestamp /* allocate space for host copies of a, b, c and setup input alues */ a = (int *)malloc( size ); b = (int *)malloc( size ); c = (int *)malloc( size ); for( int i = 0; i < N; i++ ) a[i] = b[i] = i+1; hipEventCreate(&inicio1); // Se inicializan hipEventCreate(&fin1); hipEventRecord( inicio1, 0 ); // Se toma el tiempo de inicio /* allocate space for device copies of a, b, c */ hipMalloc( (void **) &d_a, size ); hipMalloc( (void **) &d_b, size ); hipMalloc( (void **) &d_c, size ); /* copy inputs to deice */ /* fix the parameters needed to copy data to the device */ hipMemcpy( d_a, a, size, hipMemcpyHostToDevice );// pasamos los datos a las GPU hipMemcpy( d_b, b, size, hipMemcpyHostToDevice ); hipEventCreate(&inicio2); // Se inicializan hipEventCreate(&fin2); hipEventRecord( inicio2, 0 ); // Se toma el tiempo de inicio /* launch the kernel on the GPU */ hipLaunchKernelGGL(( add), dim3(1), dim3(THREADS_PER_BLOCK) , 0, 0, d_a, d_b, d_c );//esto es un 1-liner hipEventRecord( fin2, 0); // Se toma el tiempo final. hipEventSynchronize( fin2 ); // Se sincroniza hipEventElapsedTime( &tiempo2, inicio2, fin2 ); /* copy result back to host */ /* fix the parameters needed to copy data back to the host */ hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost );//traduccion: regresamos los datos a ram hipFree( d_a ); hipFree( d_b ); hipFree( d_c ); hipEventRecord( fin1, 0); // Se toma el tiempo final. hipEventSynchronize( fin1 ); // Se sincroniza hipEventElapsedTime( &tiempo1, inicio1, fin1 ); //for (int i=0; i<N; i++) // printf( "%d + %d = %d\n", a[i], b[i], c[i] ); /* clean up */ free(a); free(b); free(c); printf("tiempo calculos en ms: %f\t tiempo de total %f\n", tiempo2,tiempo1); return 0; } /* end main */
6690543220411b31f7852826f612bcde6d84dede.cu
#include <stdio.h> /* experiment with N */ #define N 1000000 #define THREADS_PER_BLOCK 1000 __global__ void add(int *a, int *b, int *c) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) c[index] = a[index] + b[index]; }//funcion de kernel cuda int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof( int ); float tiempo1, tiempo2; cudaEvent_t inicio1, fin1, inicio2, fin2; // para medir tiempos como con timestamp /* allocate space for host copies of a, b, c and setup input alues */ a = (int *)malloc( size ); b = (int *)malloc( size ); c = (int *)malloc( size ); for( int i = 0; i < N; i++ ) a[i] = b[i] = i+1; cudaEventCreate(&inicio1); // Se inicializan cudaEventCreate(&fin1); cudaEventRecord( inicio1, 0 ); // Se toma el tiempo de inicio /* allocate space for device copies of a, b, c */ cudaMalloc( (void **) &d_a, size ); cudaMalloc( (void **) &d_b, size ); cudaMalloc( (void **) &d_c, size ); /* copy inputs to deice */ /* fix the parameters needed to copy data to the device */ cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );// pasamos los datos a las GPU cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice ); cudaEventCreate(&inicio2); // Se inicializan cudaEventCreate(&fin2); cudaEventRecord( inicio2, 0 ); // Se toma el tiempo de inicio /* launch the kernel on the GPU */ add<<< 1, THREADS_PER_BLOCK >>>( d_a, d_b, d_c );//esto es un 1-liner cudaEventRecord( fin2, 0); // Se toma el tiempo final. cudaEventSynchronize( fin2 ); // Se sincroniza cudaEventElapsedTime( &tiempo2, inicio2, fin2 ); /* copy result back to host */ /* fix the parameters needed to copy data back to the host */ cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );//traduccion: regresamos los datos a ram cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); cudaEventRecord( fin1, 0); // Se toma el tiempo final. cudaEventSynchronize( fin1 ); // Se sincroniza cudaEventElapsedTime( &tiempo1, inicio1, fin1 ); //for (int i=0; i<N; i++) // printf( "%d + %d = %d\n", a[i], b[i], c[i] ); /* clean up */ free(a); free(b); free(c); printf("tiempo calculos en ms: %f\t tiempo de total %f\n", tiempo2,tiempo1); return 0; } /* end main */
15765e6d0c469220042bd498161b83d97f1ed2be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THHUNN/generic/SpatialUpSamplingBilinear.hip" #else #include <THHUNN/linear_upsampling.h> static inline void THNN_(SpatialUpSamplingBilinear_shapeCheck) (THCState *state, THCTensor *input, THCTensor *gradOutput, int nBatch, int nChannels, int inputHeight, int inputWidth, int outputHeight, int outputWidth) { THArgCheck(inputHeight > 0 && inputWidth > 0 && outputHeight > 0 && outputWidth > 0, 2, "input and output sizes should be greater than 0," " but got input (H: %d, W: %d) output (H: %d, W: %d)", inputHeight, inputWidth, outputHeight, outputWidth); if (input != NULL) { THCUNN_argCheck(state, !input->is_empty() && input->dim() == 4, 2, input, "non-empty 4D input tensor expected but got: %s"); } if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 4, 0, nBatch); THCUNN_check_dim_size(state, gradOutput, 4, 1, nChannels); THCUNN_check_dim_size(state, gradOutput, 4, 2, outputHeight); THCUNN_check_dim_size(state, gradOutput, 4, 3, outputWidth); } } void THNN_(SpatialUpSamplingBilinear_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int outputHeight, int outputWidth, bool align_corners) { int nbatch = THCTensor_(size)(state, input, 0); int channels = THCTensor_(size)(state, input, 1); int inputHeight = THCTensor_(size)(state, input, 2); int inputWidth = THCTensor_(size)(state, input, 3); THNN_(SpatialUpSamplingBilinear_shapeCheck) (state, input, NULL, nbatch, channels, inputHeight, inputWidth, outputHeight, outputWidth); THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resize4d)(state, output, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), outputHeight, outputWidth); THCTensor_(zero)(state, output); THCDeviceTensor<scalar_t, 4> idata = toDeviceTensor<scalar_t, 4>(state, input); THCDeviceTensor<scalar_t, 4> odata = toDeviceTensor<scalar_t, 4>(state, output); THAssert(inputHeight > 0 && inputWidth > 0 && outputHeight > 0 && outputWidth > 0); const accreal rheight = linear_upsampling_compute_scale<accreal>(inputHeight, outputHeight, align_corners); const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners); const int num_kernels = outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; hipStream_t stream = THCState_getCurrentStream(state); hipLaunchKernelGGL(( caffe_gpu_interp2_kernel<scalar_t, accreal>) , dim3(THCCeilDiv(num_kernels, num_threads)), dim3(num_threads) , 0 , stream, num_kernels, rheight, rwidth, align_corners, idata, odata); THCudaCheck(hipGetLastError()); } void THNN_(SpatialUpSamplingBilinear_updateGradInput)( THCState *state, THCTensor *gradOutput, THCTensor *gradInput, int nbatch, int nchannels, int inputHeight, int inputWidth, int outputHeight, int outputWidth, bool align_corners) { THNN_(SpatialUpSamplingBilinear_shapeCheck) (state, NULL, gradOutput, nbatch, nchannels, inputHeight, inputWidth, outputHeight, outputWidth); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THCTensor_(resize4d)(state, gradInput, nbatch, nchannels, inputHeight, inputWidth); THCTensor_(zero)(state, gradInput); THCDeviceTensor<scalar_t, 4> data1 = toDeviceTensor<scalar_t, 4>(state, gradInput); THCDeviceTensor<scalar_t, 4> data2 = toDeviceTensor<scalar_t, 4>(state, gradOutput); const accreal rheight = linear_upsampling_compute_scale<accreal>(inputHeight, outputHeight, align_corners); const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners); const int num_kernels = outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; hipStream_t stream = THCState_getCurrentStream(state); hipLaunchKernelGGL(( caffe_gpu_interp2_kernel_backward<scalar_t ,accreal>) , dim3(THCCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rheight, rwidth, align_corners, data1, data2); THCudaCheck(hipGetLastError()); THCTensor_(free)(state, gradOutput); } #endif
15765e6d0c469220042bd498161b83d97f1ed2be.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THCUNN/generic/SpatialUpSamplingBilinear.cu" #else #include <THCUNN/linear_upsampling.h> static inline void THNN_(SpatialUpSamplingBilinear_shapeCheck) (THCState *state, THCTensor *input, THCTensor *gradOutput, int nBatch, int nChannels, int inputHeight, int inputWidth, int outputHeight, int outputWidth) { THArgCheck(inputHeight > 0 && inputWidth > 0 && outputHeight > 0 && outputWidth > 0, 2, "input and output sizes should be greater than 0," " but got input (H: %d, W: %d) output (H: %d, W: %d)", inputHeight, inputWidth, outputHeight, outputWidth); if (input != NULL) { THCUNN_argCheck(state, !input->is_empty() && input->dim() == 4, 2, input, "non-empty 4D input tensor expected but got: %s"); } if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 4, 0, nBatch); THCUNN_check_dim_size(state, gradOutput, 4, 1, nChannels); THCUNN_check_dim_size(state, gradOutput, 4, 2, outputHeight); THCUNN_check_dim_size(state, gradOutput, 4, 3, outputWidth); } } void THNN_(SpatialUpSamplingBilinear_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int outputHeight, int outputWidth, bool align_corners) { int nbatch = THCTensor_(size)(state, input, 0); int channels = THCTensor_(size)(state, input, 1); int inputHeight = THCTensor_(size)(state, input, 2); int inputWidth = THCTensor_(size)(state, input, 3); THNN_(SpatialUpSamplingBilinear_shapeCheck) (state, input, NULL, nbatch, channels, inputHeight, inputWidth, outputHeight, outputWidth); THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resize4d)(state, output, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), outputHeight, outputWidth); THCTensor_(zero)(state, output); THCDeviceTensor<scalar_t, 4> idata = toDeviceTensor<scalar_t, 4>(state, input); THCDeviceTensor<scalar_t, 4> odata = toDeviceTensor<scalar_t, 4>(state, output); THAssert(inputHeight > 0 && inputWidth > 0 && outputHeight > 0 && outputWidth > 0); const accreal rheight = linear_upsampling_compute_scale<accreal>(inputHeight, outputHeight, align_corners); const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners); const int num_kernels = outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); caffe_gpu_interp2_kernel<scalar_t, accreal> <<<THCCeilDiv(num_kernels, num_threads), num_threads , 0 , stream>>>(num_kernels, rheight, rwidth, align_corners, idata, odata); THCudaCheck(cudaGetLastError()); } void THNN_(SpatialUpSamplingBilinear_updateGradInput)( THCState *state, THCTensor *gradOutput, THCTensor *gradInput, int nbatch, int nchannels, int inputHeight, int inputWidth, int outputHeight, int outputWidth, bool align_corners) { THNN_(SpatialUpSamplingBilinear_shapeCheck) (state, NULL, gradOutput, nbatch, nchannels, inputHeight, inputWidth, outputHeight, outputWidth); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THCTensor_(resize4d)(state, gradInput, nbatch, nchannels, inputHeight, inputWidth); THCTensor_(zero)(state, gradInput); THCDeviceTensor<scalar_t, 4> data1 = toDeviceTensor<scalar_t, 4>(state, gradInput); THCDeviceTensor<scalar_t, 4> data2 = toDeviceTensor<scalar_t, 4>(state, gradOutput); const accreal rheight = linear_upsampling_compute_scale<accreal>(inputHeight, outputHeight, align_corners); const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners); const int num_kernels = outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); caffe_gpu_interp2_kernel_backward<scalar_t ,accreal> <<<THCCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, rheight, rwidth, align_corners, data1, data2); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, gradOutput); } #endif
8ab68344415c1d47b0b6e11bfed70a2204bda299.hip
// !!! This is a file automatically generated by hipify!!! /* Task #8 - Gustavo Ciotto Pinton MO644 - Parallel Programming */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 #include <hip/hip_runtime.h> #define THREAD_PER_BLOCK 1024 /* Tesla k40 supports 1024 threads per block */ typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } __global__ void cudaHistogram (PPMPixel *data, int size, int *h) { int i = threadIdx.x + blockIdx.x * blockDim.x, stride = blockDim.x * gridDim.x; /* Gives the number of threads in a grid */ while (i < size) { /* Implicit conversion from float to int gives the same result of floor() function */ int r = ( (float) (data[i].red * 4) / 256), g = ( (float) (data[i].green * 4) / 256), b = ( (float) (data[i].blue * 4) / 256); int x = r * 16 + g * 4 + b; atomicAdd(&h[x], 1); i += stride; } } int main(int argc, char *argv[]) { if( argc != 2 ) { printf("Too many or no one arguments supplied.\n"); return 0; } int i, n; char *filename = argv[1]; //Recebendo o arquivo!; #ifdef PRINT_TIME double start, end, cuda_malloc_t, cuda_copy_t, cuda_kernel_t; #endif PPMImage *image = readPPM(filename); n = image->x * image->y; int *h = (int*)malloc(sizeof(int) * 64); #ifdef PRINT_TIME /* We consider in the execution delay the memory allocation time */ start = rtclock(); #endif /* Allocating memory for image data in the device */ int image_size = n * sizeof(PPMPixel); PPMPixel *cuda_image; hipMalloc((void**) &cuda_image, image_size); /* Allocating memory for histogram in the device */ int *cuda_h; hipMalloc((void**) &cuda_h, 64 * sizeof(int)); hipMemset(cuda_h, 0, 64 * sizeof(int)); #ifdef PRINT_TIME cuda_malloc_t = rtclock(); #endif hipMemcpy(cuda_image, image->data, image_size, hipMemcpyHostToDevice); #ifdef PRINT_TIME cuda_copy_t = rtclock(); #endif /* Computes how many blocks will be used. */ int cuda_blocks = ceil ( (float) n / THREAD_PER_BLOCK ); hipLaunchKernelGGL(( cudaHistogram) , dim3(cuda_blocks), dim3(THREAD_PER_BLOCK) , 0, 0, cuda_image, n, cuda_h); #ifdef PRINT_TIME hipDeviceSynchronize(); cuda_kernel_t = rtclock(); #endif /* Copying computed result from device memory */ hipMemcpy(h, cuda_h, sizeof(int) * 64, hipMemcpyDeviceToHost); #ifdef PRINT_TIME /* As hipMemcpy is a blocking call, we do not need to call hipDeviceSynchronize() */ end = rtclock(); #endif for (i = 0; i < 64; i++){ printf("%0.3f ", (float) h[i] / n); } printf("\n"); #ifdef PRINT_TIME printf("\nBuffer:%0.6lfs\nEnviar:%0.6lfs\nKernel:%0.6lfs\nReceber:%0.6lfs\nTotal: %0.6lfs\n", cuda_malloc_t - start, cuda_copy_t - cuda_malloc_t, cuda_kernel_t - cuda_copy_t, end - cuda_kernel_t, end - start); #endif /* Cleaning everything up */ free(h); free(image->data); free(image); hipFree(cuda_image); hipFree(cuda_h); } /* Time table: arq1.ppm arq2.ppm arq3.ppm tempo_serial 0.342149s 0.608602s 1.813922s tempo_GPU_criar_buffer 0.288812s 0.292572s 0.312226s tempo_GPU_offload_enviar 0.000826s 0.001253s 0.004878s tempo_kernel 0.001099s 0.003102s 0.011870s tempo_GPU_offload_receber 0.000024s 0.000022s 0.000035s tempo_GPU_total 0.290761s 0.296949s 0.329009s speedup 1.17674 2.04952 5.51329 */
8ab68344415c1d47b0b6e11bfed70a2204bda299.cu
/* Task #8 - Gustavo Ciotto Pinton MO644 - Parallel Programming */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 #include <cuda.h> #define THREAD_PER_BLOCK 1024 /* Tesla k40 supports 1024 threads per block */ typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } __global__ void cudaHistogram (PPMPixel *data, int size, int *h) { int i = threadIdx.x + blockIdx.x * blockDim.x, stride = blockDim.x * gridDim.x; /* Gives the number of threads in a grid */ while (i < size) { /* Implicit conversion from float to int gives the same result of floor() function */ int r = ( (float) (data[i].red * 4) / 256), g = ( (float) (data[i].green * 4) / 256), b = ( (float) (data[i].blue * 4) / 256); int x = r * 16 + g * 4 + b; atomicAdd(&h[x], 1); i += stride; } } int main(int argc, char *argv[]) { if( argc != 2 ) { printf("Too many or no one arguments supplied.\n"); return 0; } int i, n; char *filename = argv[1]; //Recebendo o arquivo!; #ifdef PRINT_TIME double start, end, cuda_malloc_t, cuda_copy_t, cuda_kernel_t; #endif PPMImage *image = readPPM(filename); n = image->x * image->y; int *h = (int*)malloc(sizeof(int) * 64); #ifdef PRINT_TIME /* We consider in the execution delay the memory allocation time */ start = rtclock(); #endif /* Allocating memory for image data in the device */ int image_size = n * sizeof(PPMPixel); PPMPixel *cuda_image; cudaMalloc((void**) &cuda_image, image_size); /* Allocating memory for histogram in the device */ int *cuda_h; cudaMalloc((void**) &cuda_h, 64 * sizeof(int)); cudaMemset(cuda_h, 0, 64 * sizeof(int)); #ifdef PRINT_TIME cuda_malloc_t = rtclock(); #endif cudaMemcpy(cuda_image, image->data, image_size, cudaMemcpyHostToDevice); #ifdef PRINT_TIME cuda_copy_t = rtclock(); #endif /* Computes how many blocks will be used. */ int cuda_blocks = ceil ( (float) n / THREAD_PER_BLOCK ); cudaHistogram <<< cuda_blocks, THREAD_PER_BLOCK >>> (cuda_image, n, cuda_h); #ifdef PRINT_TIME cudaThreadSynchronize(); cuda_kernel_t = rtclock(); #endif /* Copying computed result from device memory */ cudaMemcpy(h, cuda_h, sizeof(int) * 64, cudaMemcpyDeviceToHost); #ifdef PRINT_TIME /* As cudaMemcpy is a blocking call, we do not need to call cudaThreadSynchronize() */ end = rtclock(); #endif for (i = 0; i < 64; i++){ printf("%0.3f ", (float) h[i] / n); } printf("\n"); #ifdef PRINT_TIME printf("\nBuffer:%0.6lfs\nEnviar:%0.6lfs\nKernel:%0.6lfs\nReceber:%0.6lfs\nTotal: %0.6lfs\n", cuda_malloc_t - start, cuda_copy_t - cuda_malloc_t, cuda_kernel_t - cuda_copy_t, end - cuda_kernel_t, end - start); #endif /* Cleaning everything up */ free(h); free(image->data); free(image); cudaFree(cuda_image); cudaFree(cuda_h); } /* Time table: arq1.ppm arq2.ppm arq3.ppm tempo_serial 0.342149s 0.608602s 1.813922s tempo_GPU_criar_buffer 0.288812s 0.292572s 0.312226s tempo_GPU_offload_enviar 0.000826s 0.001253s 0.004878s tempo_kernel 0.001099s 0.003102s 0.011870s tempo_GPU_offload_receber 0.000024s 0.000022s 0.000035s tempo_GPU_total 0.290761s 0.296949s 0.329009s speedup 1.17674 2.04952 5.51329 */
d25c9ce6250f94e383e34ad1bb7edce62e86189c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <THH/THHAtomics.cuh> #include "../nms/cuda_helpers.h" template <typename T> __global__ void RoIPoolForward( const int nthreads, const T* input, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T* rois, T* output, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero T maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; const T* offset_input = input + (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * width + w; if (offset_input[input_index] > maxval) { maxval = offset_input[input_index]; maxidx = input_index; } } } output[index] = maxval; argmax_data[index] = maxidx; } } template <typename T> __global__ void RoIPoolBackward( const int nthreads, const T* grad_output, const int* argmax_data, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, T* grad_input, const T* rois, const int n_stride, const int c_stride, const int h_stride, const int w_stride) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; T* grad_input_offset = grad_input + ((roi_batch_ind * channels + c) * height * width); int output_offset = n * n_stride + c * c_stride; const int* argmax_data_offset = argmax_data + (n * channels + c) * pooled_height * pooled_width; int argmax = argmax_data_offset[ph * pooled_width + pw]; if (argmax != -1) { atomicAdd( grad_input_offset + argmax, static_cast<T>( grad_output[output_offset + ph * h_stride + pw * w_stride])); } } } namespace detectron2 { std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda( const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "ROIPool_forward_cuda"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); at::Tensor output = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options()); at::Tensor argmax = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); auto output_size = num_rois * pooled_height * pooled_width * channels; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min( ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); if (output.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(output, argmax); } auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIPool_forward", [&] { hipLaunchKernelGGL(( RoIPoolForward<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input_.data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois_.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), argmax.data_ptr<int>()); }); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(output, argmax); } at::Tensor ROIPool_backward_cuda( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width) { // Check if input tensors are CUDA tensors AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(argmax.is_cuda(), "argmax must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, argmax_t{argmax, "argmax", 3}; at::CheckedFrom c = "ROIPool_backward_cuda"; at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device()); auto num_rois = rois.size(0); at::Tensor grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min( ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return grad_input; } int n_stride = grad.stride(0); int c_stride = grad.stride(1); int h_stride = grad.stride(2); int w_stride = grad.stride(3); auto argmax_ = argmax.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIPool_backward", [&] { hipLaunchKernelGGL(( RoIPoolBackward<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.data_ptr<scalar_t>(), argmax_.data_ptr<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, grad_input.data_ptr<scalar_t>(), rois_.data_ptr<scalar_t>(), n_stride, c_stride, h_stride, w_stride); }); AT_CUDA_CHECK(hipGetLastError()); return grad_input; } }
d25c9ce6250f94e383e34ad1bb7edce62e86189c.cu
#include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <THC/THCAtomics.cuh> #include "../nms/cuda_helpers.h" template <typename T> __global__ void RoIPoolForward( const int nthreads, const T* input, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T* rois, T* output, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero T maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; const T* offset_input = input + (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * width + w; if (offset_input[input_index] > maxval) { maxval = offset_input[input_index]; maxidx = input_index; } } } output[index] = maxval; argmax_data[index] = maxidx; } } template <typename T> __global__ void RoIPoolBackward( const int nthreads, const T* grad_output, const int* argmax_data, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, T* grad_input, const T* rois, const int n_stride, const int c_stride, const int h_stride, const int w_stride) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; T* grad_input_offset = grad_input + ((roi_batch_ind * channels + c) * height * width); int output_offset = n * n_stride + c * c_stride; const int* argmax_data_offset = argmax_data + (n * channels + c) * pooled_height * pooled_width; int argmax = argmax_data_offset[ph * pooled_width + pw]; if (argmax != -1) { atomicAdd( grad_input_offset + argmax, static_cast<T>( grad_output[output_offset + ph * h_stride + pw * w_stride])); } } } namespace detectron2 { std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda( const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "ROIPool_forward_cuda"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::cuda::CUDAGuard device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); at::Tensor output = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options()); at::Tensor argmax = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); auto output_size = num_rois * pooled_height * pooled_width * channels; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min( ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); if (output.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(output, argmax); } auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIPool_forward", [&] { RoIPoolForward<scalar_t><<<grid, block, 0, stream>>>( output_size, input_.data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois_.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), argmax.data_ptr<int>()); }); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(output, argmax); } at::Tensor ROIPool_backward_cuda( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width) { // Check if input tensors are CUDA tensors AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(argmax.is_cuda(), "argmax must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, argmax_t{argmax, "argmax", 3}; at::CheckedFrom c = "ROIPool_backward_cuda"; at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::cuda::CUDAGuard device_guard(grad.device()); auto num_rois = rois.size(0); at::Tensor grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min( ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_input; } int n_stride = grad.stride(0); int c_stride = grad.stride(1); int h_stride = grad.stride(2); int w_stride = grad.stride(3); auto argmax_ = argmax.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIPool_backward", [&] { RoIPoolBackward<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad.data_ptr<scalar_t>(), argmax_.data_ptr<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, grad_input.data_ptr<scalar_t>(), rois_.data_ptr<scalar_t>(), n_stride, c_stride, h_stride, w_stride); }); AT_CUDA_CHECK(cudaGetLastError()); return grad_input; } }
b89b6ed0557a47bc4408e37eda4b4043e2fdcf41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // User: g213@157.88.125.142 // ExecutionRequest[P:'LaBuena.cu',P:1,T:1,args:'',q:'cudalb'] // May 16 2019 14:01:02 #include "cputils.h" // Added by tablon /* Simplified simulation of fire extinguishing * * Computacion Paralela, Grado en Informatica (Universidad de Valladolid) * 2018/2019 * * v1.4 * * (c) 2019 Arturo Gonzalez Escribano * Cambios producidos por Rodrigo Beltran y Alvaro Villa */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> #include <cputils.h> #define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ] #define RADIUS_TYPE_1 3 #define RADIUS_TYPE_2_3 9 #define THRESHOLD 0.1f /* Structure to store data of an extinguishing team */ typedef struct { int x,y; int type; int target; } Team; /* Structure to store data of a fire focal point */ typedef struct { int x,y; int start; int heat; int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team } focoPoint; /* Macro function to simplify accessing with two coordinates to a flattened array */ //Funcion que se encarga de inicializar las matrices que le pasamos de parametros. __global__ void inicializar(float *surface,int rows,int columns){ int x=threadIdx.y+blockDim.y*blockIdx.y; int y=threadIdx.x+blockDim.x*blockIdx.x; if (x >= rows || y>= columns){ return; } surface[x*columns+y]=0; } //Funcion que se encarga de actualizar los valores de la matriz. Calcula el calor //y lo introduce en la matriz principal __global__ void actualizar(float *surface, float *surfaceCopy,int rows, int columns){ int x=threadIdx.y+blockDim.y*blockIdx.y; int y=threadIdx.x+blockDim.x*blockIdx.x; if (x >= rows-1 || x==0 || y>= columns-1 || y==0) return; surface[x*columns+y]=( surfaceCopy[(x-1)*columns+y]+ surfaceCopy[(x+1)*columns+y]+ surfaceCopy[x*columns+y-1]+ surfaceCopy[x*columns+y+1])/4; } //Esta funcion se encarga de propagar el calor (actualizandolo) __global__ void propagar(float *surface,int i,int j, int columns , int heat){ surface[i*columns+j]=heat; } //Esta funcion se encarga de reducir la cantidad de calor (apaga) para los equipos 2 y 3 __global__ void apagarEquipo23(float *surface,int x,int y,int radius,int rows,int columns){ int i=x-radius+threadIdx.y; int j=y-radius+threadIdx.x; if(threadIdx.x>=19 || threadIdx.y >=19) return; if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) return; // Out of the heated surface float dx = x - i; float dy = y - j; float distance = dx*dx + dy*dy ; if ( distance <= radius*radius ) { surface[i*columns+j]*=0.75; // Team efficiency factor } } //Esta funcion se encarga de reducir la cantidad de calor (apaga) para el equipo 1 __global__ void apagarEquipo1(float *surface,int x,int y,int radius,int rows,int columns){ int i=x-radius+threadIdx.y; int j=y-radius+threadIdx.x; if(threadIdx.x>=7 || threadIdx.y >=7) return; if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) return; // Out of the heated surface float dx = x - i; float dy = y - j; float distance = dx*dx + dy*dy ; if ( distance <= radius*radius ) { surface[i*columns+j]*=0.75; // Team efficiency factor } } //Esta funcion es la indicada para mover los equipos a lo largo de la matriz para //que se arrimen al foco y apaguen el fuego __global__ void moverEquipos(Team *teams,focoPoint *focal, int num_teams,int num_focal){ int j; int id=threadIdx.x+blockDim.x*blockIdx.x; if(id>num_teams){ return; } float distance = FLT_MAX; int target = -1; for( j=0; j<num_focal; j++ ) { if ( focal[j].active != 1 ) continue; // Skip non-active focal points float dx = focal[j].x - teams[id].x; float dy = focal[j].y - teams[id].y; float local_distance = sqrtf( dx*dx + dy*dy ); if ( local_distance < distance ) { distance = local_distance; target = j; } } /* 4.3.2. Annotate target for the next stage */ teams[id].target = target; /* 4.3.3. No active focal point to choose, no movement */ if ( target == -1 ) return; /* 4.3.4. Move in the focal point direction */ if ( teams[id].type == 1 ) { // Type 1: Can move in diagonal if ( focal[target].x < teams[id].x ) teams[id].x--; if ( focal[target].x > teams[id].x ) teams[id].x++; if ( focal[target].y < teams[id].y ) teams[id].y--; if ( focal[target].y > teams[id].y ) teams[id].y++; } else if ( teams[id].type == 2 ) { // Type 2: First in horizontal direction, then in vertical direction if ( focal[target].y < teams[id].y ) teams[id].y--; else if ( focal[target].y > teams[id].y ) teams[id].y++; else if ( focal[target].x < teams[id].x ) teams[id].x--; else if ( focal[target].x > teams[id].x ) teams[id].x++; } else { // Type 3: First in vertical direction, then in horizontal direction if ( focal[target].x < teams[id].x ) teams[id].x--; else if ( focal[target].x > teams[id].x ) teams[id].x++; else if ( focal[target].y < teams[id].y ) teams[id].y--; else if ( focal[target].y > teams[id].y ) teams[id].y++; } } /* * Function: Print usage line in stderr */ void show_usage( char *program_name ) { fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name ); fprintf(stderr,"\t<config_file> ::= -f <file_name>\n"); fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numfocoPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n"); fprintf(stderr,"\n"); } #ifdef DEBUG /* * Function: Print the current state of the simulation */ void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, focoPoint *focal, float global_residual ) { /* * You don't need to optimize this function, it is only for pretty printing and debugging purposes. * It is not compiled in the production versions of the program. * Thus, it is never used when measuring times in the leaderboard */ int i,j; printf("Iteration: %d\n", iteration ); printf("+"); for( j=0; j<columns; j++ ) printf("---"); printf("+\n"); for( i=0; i<rows; i++ ) { printf("|"); for( j=0; j<columns; j++ ) { char symbol; if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*'; else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100); else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+'; else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.'; else symbol = '0'; int t; int flag_team = 0; for( t=0; t<num_teams; t++ ) if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; } if ( flag_team ) printf("[%c]", symbol ); else { int f; int flag_focal = 0; for( f=0; f<num_focal; f++ ) if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; } if ( flag_focal ) printf("(%c)", symbol ); else printf(" %c ", symbol ); } } printf("|\n"); } printf("+"); for( j=0; j<columns; j++ ) printf("---"); printf("+\n"); printf("Global residual: %f\n\n", global_residual); } #endif /* * MAIN PROGRAM */ int main(int argc, char *argv[]) { int i,j,t; // Simulation data int rows, columns, max_iter; float *surface, *surfaceCopy; int num_teams, num_focal; Team *teams; focoPoint *focal; /* 1. Read simulation arguments */ /* 1.1. Check minimum number of arguments */ if (argc<2) { fprintf(stderr,"-- Error in arguments: No arguments\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } int read_from_file = ! strcmp( argv[1], "-f" ); /* 1.2. Read configuration from file */ if ( read_from_file ) { /* 1.2.1. Open file */ if (argc<3) { fprintf(stderr,"-- Error in arguments: file-name argument missing\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } FILE *args = cp_abrir_fichero( argv[2] ); if ( args == NULL ) { fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]); exit( EXIT_FAILURE ); } /* 1.2.2. Read surface and maximum number of iterations */ int ok; ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter); if ( ok != 3 ) { fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]); exit( EXIT_FAILURE ); } surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); if ( surface == NULL || surfaceCopy == NULL ) { fprintf(stderr,"-- Error allocating: surface structures\n"); exit( EXIT_FAILURE ); } /* 1.2.3. Teams information */ ok = fscanf(args, "%d", &num_teams ); if ( ok != 1 ) { fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]); exit( EXIT_FAILURE ); } teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams ); if ( teams == NULL ) { fprintf(stderr,"-- Error allocating: %d teams\n", num_teams ); exit( EXIT_FAILURE ); } for( i=0; i<num_teams; i++ ) { ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type); if ( ok != 3 ) { fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]); exit( EXIT_FAILURE ); } } /* 1.2.4. foco points information */ ok = fscanf(args, "%d", &num_focal ); if ( ok != 1 ) { fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]); exit( EXIT_FAILURE ); } focal = (focoPoint *)malloc( sizeof(focoPoint) * (size_t)num_focal ); if ( focal == NULL ) { fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal ); exit( EXIT_FAILURE ); } for( i=0; i<num_focal; i++ ) { ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat); if ( ok != 4 ) { fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]); exit( EXIT_FAILURE ); } focal[i].active = 0; } } /* 1.3. Read configuration from arguments */ else { /* 1.3.1. Check minimum number of arguments */ if (argc<6) { fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } /* 1.3.2. Surface and maximum number of iterations */ rows = atoi( argv[1] ); columns = atoi( argv[2] ); max_iter = atoi( argv[3] ); surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); /* 1.3.3. Teams information */ num_teams = atoi( argv[4] ); teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams ); if ( teams == NULL ) { fprintf(stderr,"-- Error allocating: %d teams\n", num_teams ); exit( EXIT_FAILURE ); } if ( argc < num_teams*3 + 5 ) { fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams ); exit( EXIT_FAILURE ); } for( i=0; i<num_teams; i++ ) { teams[i].x = atoi( argv[5+i*3] ); teams[i].y = atoi( argv[6+i*3] ); teams[i].type = atoi( argv[7+i*3] ); } /* 1.3.4. foco points information */ int focal_args = 5 + i*3; if ( argc < focal_args+1 ) { fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } num_focal = atoi( argv[focal_args] ); focal = (focoPoint *)malloc( sizeof(focoPoint) * (size_t)num_focal ); if ( teams == NULL ) { fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal ); exit( EXIT_FAILURE ); } if ( argc < focal_args + 1 + num_focal*4 ) { fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal ); exit( EXIT_FAILURE ); } for( i=0; i<num_focal; i++ ) { focal[i].x = atoi( argv[focal_args+i*4+1] ); focal[i].y = atoi( argv[focal_args+i*4+2] ); focal[i].start = atoi( argv[focal_args+i*4+3] ); focal[i].heat = atoi( argv[focal_args+i*4+4] ); focal[i].active = 0; } /* 1.3.5. Sanity check: No extra arguments at the end of line */ if ( argc > focal_args+i*4+1 ) { fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } } #ifdef DEBUG /* 1.4. Print arguments */ printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter); printf("Arguments, Teams: %d, foco points: %d\n", num_teams, num_focal ); for( i=0; i<num_teams; i++ ) { printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type ); } for( i=0; i<num_focal; i++ ) { printf("\tfoco_point %d, position (%d,%d), start time: %d, temperature: %d\n", i, focal[i].x, focal[i].y, focal[i].start, focal[i].heat ); } #endif // DEBUG /* 2. Select GPU and start global timer */ hipSetDevice(0); hipDeviceSynchronize(); double ttotal = cp_Wtime(); /* * * START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT * */ float *Surface, *SurfaceCopy; focoPoint *foco; Team *equipo; hipMalloc((void **)&Surface,sizeof(float)*rows*columns); hipMalloc((void **) &SurfaceCopy,sizeof(float)*rows*columns); hipMalloc((void **) &foco,sizeof(focoPoint)*num_focal); hipMalloc((void **) &equipo,sizeof(Team)*num_teams); hipMemcpy(equipo,teams,sizeof(Team)*num_teams,hipMemcpyHostToDevice); //Establecemos los valores de los tamanos de bloque int tBlockX= 64; int tBlockY= 4; int tBlockTeams=256; int tamGrx, tamGry,tamGridTeams; tamGridTeams= num_teams/tBlockTeams; if (num_teams%tBlockTeams!=0) tamGridTeams++; tamGrx= columns/tBlockX; if (columns%tBlockX!=0) tamGrx++; tamGry= rows/tBlockY; if (rows%tBlockY!=0) tamGry++; //Establecemos los tamanos del grid y de bloque dim3 gridSize(tamGrx,tamGry); dim3 blockSize(tBlockX,tBlockY); //Aqui inicializamos las matrices principales y la copia llamando a la funcion que inicializa //asi que posteriormente ya nos ahorramos volver a inicializar. hipLaunchKernelGGL(( inicializar), dim3(blockSize),dim3(gridSize), 0, 0, Surface,rows,columns); hipLaunchKernelGGL(( inicializar), dim3(blockSize),dim3(gridSize), 0, 0, SurfaceCopy,rows,columns); /* 4. Simulation */ int iter; int flag_stability = 0; int first_activation = 0; for( iter=0; iter<max_iter && ! flag_stability; iter++ ) { /* 4.1. Activate focal points */ int num_deactivated = 0; for( i=0; i<num_focal; i++ ) { if ( focal[i].start == iter ) { focal[i].active = 1; if ( ! first_activation ) first_activation = 1; } // Count focal points already deactivated by a team if ( focal[i].active == 2 ) num_deactivated++; } /* 4.2. Propagate heat (10 steps per each team movement) */ float global_residual = 0.0f; int step; for( step=0; step<10; step++ ) { /* 4.2.1. Update heat on active focal points */ for( i=0; i<num_focal; i++ ) { if ( focal[i].active != 1 ) continue; int x = focal[i].x; int y = focal[i].y; hipLaunchKernelGGL(( propagar), dim3(1),dim3(1), 0, 0, Surface,x,y,columns,focal[i].heat); } /* 4.2.2. Copy values of the surface in ancillary structure (Skip borders) */ float *aux=Surface; Surface=SurfaceCopy; SurfaceCopy=aux; /* 4.2.3. Update surface values (skip borders) */ hipLaunchKernelGGL(( actualizar), dim3(gridSize),dim3(blockSize), 0, 0, Surface,SurfaceCopy,rows,columns); /* 4.2.4. Compute the maximum residual difference (absolute value) */ if(step==0 && num_deactivated==num_focal){ hipMemcpy(surface,Surface,sizeof(float)*rows*columns,hipMemcpyDeviceToHost); hipMemcpy(surfaceCopy,SurfaceCopy,sizeof(float)*rows*columns,hipMemcpyDeviceToHost); for( i=1; i<rows-1; i++ ) for( j=1; j<columns-1; j++ ) if ( fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) ) > global_residual ) { global_residual = fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) ); } } } /* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */ if( num_deactivated == num_focal && global_residual < THRESHOLD ) flag_stability = 1; /* 4.3. Move teams */ hipMemcpy(foco,focal,sizeof(focoPoint)*num_focal,hipMemcpyHostToDevice); hipLaunchKernelGGL(( moverEquipos), dim3(tamGridTeams),dim3(tBlockTeams), 0, 0, equipo,foco,num_teams,num_focal); hipMemcpy(teams,equipo,sizeof(Team)*num_teams,hipMemcpyDeviceToHost); /* 4.4. Team actions */ for( t=0; t<num_teams; t++ ) { /* 4.4.1. Deactivate the target focal point when it is reached */ int target = teams[t].target; if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y && focal[target].active == 1 ) focal[target].active = 2; /* 4.4.2. Reduce heat in a circle around the team */ int radius; if ( teams[t].type == 1 ) {radius = RADIUS_TYPE_1; dim3 gridRed(1,1); dim3 blockRed(8,8); hipLaunchKernelGGL(( apagarEquipo1), dim3(gridRed),dim3(blockRed), 0, 0, Surface,teams[t].x,teams[t].y,radius,rows,columns); } else {radius = RADIUS_TYPE_2_3; dim3 gridRed(1,1); dim3 blockRed(32,32); hipLaunchKernelGGL(( apagarEquipo23), dim3(gridRed),dim3(blockRed), 0, 0, Surface,teams[t].x,teams[t].y,radius,rows,columns); } } #ifdef DEBUG /* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */ print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual ); #endif // DEBUG } hipMemcpy(surface,Surface,sizeof(float)*rows*columns,hipMemcpyDeviceToHost); /* * * STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT * */ /* 5. Stop global time */ hipDeviceSynchronize(); ttotal = cp_Wtime() - ttotal; /* 6. Output for leaderboard */ printf("\n"); /* 6.1. Total computation time */ printf("Time: %lf\n", ttotal ); /* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */ printf("Result: %d", iter); /* for (i=0; i<num_teams; i++) printf(" %d %d", teams[i].x, teams[i].y ); */ for (i=0; i<num_focal; i++) printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) ); printf("\n"); /* 7. Free resources */ free( teams ); free( focal ); free( surface ); free( surfaceCopy ); /* 8. End */ return 0; }
b89b6ed0557a47bc4408e37eda4b4043e2fdcf41.cu
// User: g213@157.88.125.142 // ExecutionRequest[P:'LaBuena.cu',P:1,T:1,args:'',q:'cudalb'] // May 16 2019 14:01:02 #include "cputils.h" // Added by tablon /* Simplified simulation of fire extinguishing * * Computacion Paralela, Grado en Informatica (Universidad de Valladolid) * 2018/2019 * * v1.4 * * (c) 2019 Arturo Gonzalez Escribano * Cambios producidos por Rodrigo Beltran y Alvaro Villa */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> #include <cputils.h> #define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ] #define RADIUS_TYPE_1 3 #define RADIUS_TYPE_2_3 9 #define THRESHOLD 0.1f /* Structure to store data of an extinguishing team */ typedef struct { int x,y; int type; int target; } Team; /* Structure to store data of a fire focal point */ typedef struct { int x,y; int start; int heat; int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team } focoPoint; /* Macro function to simplify accessing with two coordinates to a flattened array */ //Funcion que se encarga de inicializar las matrices que le pasamos de parametros. __global__ void inicializar(float *surface,int rows,int columns){ int x=threadIdx.y+blockDim.y*blockIdx.y; int y=threadIdx.x+blockDim.x*blockIdx.x; if (x >= rows || y>= columns){ return; } surface[x*columns+y]=0; } //Funcion que se encarga de actualizar los valores de la matriz. Calcula el calor //y lo introduce en la matriz principal __global__ void actualizar(float *surface, float *surfaceCopy,int rows, int columns){ int x=threadIdx.y+blockDim.y*blockIdx.y; int y=threadIdx.x+blockDim.x*blockIdx.x; if (x >= rows-1 || x==0 || y>= columns-1 || y==0) return; surface[x*columns+y]=( surfaceCopy[(x-1)*columns+y]+ surfaceCopy[(x+1)*columns+y]+ surfaceCopy[x*columns+y-1]+ surfaceCopy[x*columns+y+1])/4; } //Esta funcion se encarga de propagar el calor (actualizandolo) __global__ void propagar(float *surface,int i,int j, int columns , int heat){ surface[i*columns+j]=heat; } //Esta funcion se encarga de reducir la cantidad de calor (apaga) para los equipos 2 y 3 __global__ void apagarEquipo23(float *surface,int x,int y,int radius,int rows,int columns){ int i=x-radius+threadIdx.y; int j=y-radius+threadIdx.x; if(threadIdx.x>=19 || threadIdx.y >=19) return; if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) return; // Out of the heated surface float dx = x - i; float dy = y - j; float distance = dx*dx + dy*dy ; if ( distance <= radius*radius ) { surface[i*columns+j]*=0.75; // Team efficiency factor } } //Esta funcion se encarga de reducir la cantidad de calor (apaga) para el equipo 1 __global__ void apagarEquipo1(float *surface,int x,int y,int radius,int rows,int columns){ int i=x-radius+threadIdx.y; int j=y-radius+threadIdx.x; if(threadIdx.x>=7 || threadIdx.y >=7) return; if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) return; // Out of the heated surface float dx = x - i; float dy = y - j; float distance = dx*dx + dy*dy ; if ( distance <= radius*radius ) { surface[i*columns+j]*=0.75; // Team efficiency factor } } //Esta funcion es la indicada para mover los equipos a lo largo de la matriz para //que se arrimen al foco y apaguen el fuego __global__ void moverEquipos(Team *teams,focoPoint *focal, int num_teams,int num_focal){ int j; int id=threadIdx.x+blockDim.x*blockIdx.x; if(id>num_teams){ return; } float distance = FLT_MAX; int target = -1; for( j=0; j<num_focal; j++ ) { if ( focal[j].active != 1 ) continue; // Skip non-active focal points float dx = focal[j].x - teams[id].x; float dy = focal[j].y - teams[id].y; float local_distance = sqrtf( dx*dx + dy*dy ); if ( local_distance < distance ) { distance = local_distance; target = j; } } /* 4.3.2. Annotate target for the next stage */ teams[id].target = target; /* 4.3.3. No active focal point to choose, no movement */ if ( target == -1 ) return; /* 4.3.4. Move in the focal point direction */ if ( teams[id].type == 1 ) { // Type 1: Can move in diagonal if ( focal[target].x < teams[id].x ) teams[id].x--; if ( focal[target].x > teams[id].x ) teams[id].x++; if ( focal[target].y < teams[id].y ) teams[id].y--; if ( focal[target].y > teams[id].y ) teams[id].y++; } else if ( teams[id].type == 2 ) { // Type 2: First in horizontal direction, then in vertical direction if ( focal[target].y < teams[id].y ) teams[id].y--; else if ( focal[target].y > teams[id].y ) teams[id].y++; else if ( focal[target].x < teams[id].x ) teams[id].x--; else if ( focal[target].x > teams[id].x ) teams[id].x++; } else { // Type 3: First in vertical direction, then in horizontal direction if ( focal[target].x < teams[id].x ) teams[id].x--; else if ( focal[target].x > teams[id].x ) teams[id].x++; else if ( focal[target].y < teams[id].y ) teams[id].y--; else if ( focal[target].y > teams[id].y ) teams[id].y++; } } /* * Function: Print usage line in stderr */ void show_usage( char *program_name ) { fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name ); fprintf(stderr,"\t<config_file> ::= -f <file_name>\n"); fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numfocoPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n"); fprintf(stderr,"\n"); } #ifdef DEBUG /* * Function: Print the current state of the simulation */ void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, focoPoint *focal, float global_residual ) { /* * You don't need to optimize this function, it is only for pretty printing and debugging purposes. * It is not compiled in the production versions of the program. * Thus, it is never used when measuring times in the leaderboard */ int i,j; printf("Iteration: %d\n", iteration ); printf("+"); for( j=0; j<columns; j++ ) printf("---"); printf("+\n"); for( i=0; i<rows; i++ ) { printf("|"); for( j=0; j<columns; j++ ) { char symbol; if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*'; else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100); else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+'; else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.'; else symbol = '0'; int t; int flag_team = 0; for( t=0; t<num_teams; t++ ) if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; } if ( flag_team ) printf("[%c]", symbol ); else { int f; int flag_focal = 0; for( f=0; f<num_focal; f++ ) if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; } if ( flag_focal ) printf("(%c)", symbol ); else printf(" %c ", symbol ); } } printf("|\n"); } printf("+"); for( j=0; j<columns; j++ ) printf("---"); printf("+\n"); printf("Global residual: %f\n\n", global_residual); } #endif /* * MAIN PROGRAM */ int main(int argc, char *argv[]) { int i,j,t; // Simulation data int rows, columns, max_iter; float *surface, *surfaceCopy; int num_teams, num_focal; Team *teams; focoPoint *focal; /* 1. Read simulation arguments */ /* 1.1. Check minimum number of arguments */ if (argc<2) { fprintf(stderr,"-- Error in arguments: No arguments\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } int read_from_file = ! strcmp( argv[1], "-f" ); /* 1.2. Read configuration from file */ if ( read_from_file ) { /* 1.2.1. Open file */ if (argc<3) { fprintf(stderr,"-- Error in arguments: file-name argument missing\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } FILE *args = cp_abrir_fichero( argv[2] ); if ( args == NULL ) { fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]); exit( EXIT_FAILURE ); } /* 1.2.2. Read surface and maximum number of iterations */ int ok; ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter); if ( ok != 3 ) { fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]); exit( EXIT_FAILURE ); } surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); if ( surface == NULL || surfaceCopy == NULL ) { fprintf(stderr,"-- Error allocating: surface structures\n"); exit( EXIT_FAILURE ); } /* 1.2.3. Teams information */ ok = fscanf(args, "%d", &num_teams ); if ( ok != 1 ) { fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]); exit( EXIT_FAILURE ); } teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams ); if ( teams == NULL ) { fprintf(stderr,"-- Error allocating: %d teams\n", num_teams ); exit( EXIT_FAILURE ); } for( i=0; i<num_teams; i++ ) { ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type); if ( ok != 3 ) { fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]); exit( EXIT_FAILURE ); } } /* 1.2.4. foco points information */ ok = fscanf(args, "%d", &num_focal ); if ( ok != 1 ) { fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]); exit( EXIT_FAILURE ); } focal = (focoPoint *)malloc( sizeof(focoPoint) * (size_t)num_focal ); if ( focal == NULL ) { fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal ); exit( EXIT_FAILURE ); } for( i=0; i<num_focal; i++ ) { ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat); if ( ok != 4 ) { fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]); exit( EXIT_FAILURE ); } focal[i].active = 0; } } /* 1.3. Read configuration from arguments */ else { /* 1.3.1. Check minimum number of arguments */ if (argc<6) { fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } /* 1.3.2. Surface and maximum number of iterations */ rows = atoi( argv[1] ); columns = atoi( argv[2] ); max_iter = atoi( argv[3] ); surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); /* 1.3.3. Teams information */ num_teams = atoi( argv[4] ); teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams ); if ( teams == NULL ) { fprintf(stderr,"-- Error allocating: %d teams\n", num_teams ); exit( EXIT_FAILURE ); } if ( argc < num_teams*3 + 5 ) { fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams ); exit( EXIT_FAILURE ); } for( i=0; i<num_teams; i++ ) { teams[i].x = atoi( argv[5+i*3] ); teams[i].y = atoi( argv[6+i*3] ); teams[i].type = atoi( argv[7+i*3] ); } /* 1.3.4. foco points information */ int focal_args = 5 + i*3; if ( argc < focal_args+1 ) { fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } num_focal = atoi( argv[focal_args] ); focal = (focoPoint *)malloc( sizeof(focoPoint) * (size_t)num_focal ); if ( teams == NULL ) { fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal ); exit( EXIT_FAILURE ); } if ( argc < focal_args + 1 + num_focal*4 ) { fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal ); exit( EXIT_FAILURE ); } for( i=0; i<num_focal; i++ ) { focal[i].x = atoi( argv[focal_args+i*4+1] ); focal[i].y = atoi( argv[focal_args+i*4+2] ); focal[i].start = atoi( argv[focal_args+i*4+3] ); focal[i].heat = atoi( argv[focal_args+i*4+4] ); focal[i].active = 0; } /* 1.3.5. Sanity check: No extra arguments at the end of line */ if ( argc > focal_args+i*4+1 ) { fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } } #ifdef DEBUG /* 1.4. Print arguments */ printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter); printf("Arguments, Teams: %d, foco points: %d\n", num_teams, num_focal ); for( i=0; i<num_teams; i++ ) { printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type ); } for( i=0; i<num_focal; i++ ) { printf("\tfoco_point %d, position (%d,%d), start time: %d, temperature: %d\n", i, focal[i].x, focal[i].y, focal[i].start, focal[i].heat ); } #endif // DEBUG /* 2. Select GPU and start global timer */ cudaSetDevice(0); cudaDeviceSynchronize(); double ttotal = cp_Wtime(); /* * * START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT * */ float *Surface, *SurfaceCopy; focoPoint *foco; Team *equipo; cudaMalloc((void **)&Surface,sizeof(float)*rows*columns); cudaMalloc((void **) &SurfaceCopy,sizeof(float)*rows*columns); cudaMalloc((void **) &foco,sizeof(focoPoint)*num_focal); cudaMalloc((void **) &equipo,sizeof(Team)*num_teams); cudaMemcpy(equipo,teams,sizeof(Team)*num_teams,cudaMemcpyHostToDevice); //Establecemos los valores de los tamanos de bloque int tBlockX= 64; int tBlockY= 4; int tBlockTeams=256; int tamGrx, tamGry,tamGridTeams; tamGridTeams= num_teams/tBlockTeams; if (num_teams%tBlockTeams!=0) tamGridTeams++; tamGrx= columns/tBlockX; if (columns%tBlockX!=0) tamGrx++; tamGry= rows/tBlockY; if (rows%tBlockY!=0) tamGry++; //Establecemos los tamanos del grid y de bloque dim3 gridSize(tamGrx,tamGry); dim3 blockSize(tBlockX,tBlockY); //Aqui inicializamos las matrices principales y la copia llamando a la funcion que inicializa //asi que posteriormente ya nos ahorramos volver a inicializar. inicializar<<<blockSize,gridSize>>>(Surface,rows,columns); inicializar<<<blockSize,gridSize>>>(SurfaceCopy,rows,columns); /* 4. Simulation */ int iter; int flag_stability = 0; int first_activation = 0; for( iter=0; iter<max_iter && ! flag_stability; iter++ ) { /* 4.1. Activate focal points */ int num_deactivated = 0; for( i=0; i<num_focal; i++ ) { if ( focal[i].start == iter ) { focal[i].active = 1; if ( ! first_activation ) first_activation = 1; } // Count focal points already deactivated by a team if ( focal[i].active == 2 ) num_deactivated++; } /* 4.2. Propagate heat (10 steps per each team movement) */ float global_residual = 0.0f; int step; for( step=0; step<10; step++ ) { /* 4.2.1. Update heat on active focal points */ for( i=0; i<num_focal; i++ ) { if ( focal[i].active != 1 ) continue; int x = focal[i].x; int y = focal[i].y; propagar<<<1,1>>>(Surface,x,y,columns,focal[i].heat); } /* 4.2.2. Copy values of the surface in ancillary structure (Skip borders) */ float *aux=Surface; Surface=SurfaceCopy; SurfaceCopy=aux; /* 4.2.3. Update surface values (skip borders) */ actualizar<<<gridSize,blockSize>>>(Surface,SurfaceCopy,rows,columns); /* 4.2.4. Compute the maximum residual difference (absolute value) */ if(step==0 && num_deactivated==num_focal){ cudaMemcpy(surface,Surface,sizeof(float)*rows*columns,cudaMemcpyDeviceToHost); cudaMemcpy(surfaceCopy,SurfaceCopy,sizeof(float)*rows*columns,cudaMemcpyDeviceToHost); for( i=1; i<rows-1; i++ ) for( j=1; j<columns-1; j++ ) if ( fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) ) > global_residual ) { global_residual = fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) ); } } } /* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */ if( num_deactivated == num_focal && global_residual < THRESHOLD ) flag_stability = 1; /* 4.3. Move teams */ cudaMemcpy(foco,focal,sizeof(focoPoint)*num_focal,cudaMemcpyHostToDevice); moverEquipos<<<tamGridTeams,tBlockTeams>>>(equipo,foco,num_teams,num_focal); cudaMemcpy(teams,equipo,sizeof(Team)*num_teams,cudaMemcpyDeviceToHost); /* 4.4. Team actions */ for( t=0; t<num_teams; t++ ) { /* 4.4.1. Deactivate the target focal point when it is reached */ int target = teams[t].target; if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y && focal[target].active == 1 ) focal[target].active = 2; /* 4.4.2. Reduce heat in a circle around the team */ int radius; if ( teams[t].type == 1 ) {radius = RADIUS_TYPE_1; dim3 gridRed(1,1); dim3 blockRed(8,8); apagarEquipo1<<<gridRed,blockRed>>>(Surface,teams[t].x,teams[t].y,radius,rows,columns); } else {radius = RADIUS_TYPE_2_3; dim3 gridRed(1,1); dim3 blockRed(32,32); apagarEquipo23<<<gridRed,blockRed>>>(Surface,teams[t].x,teams[t].y,radius,rows,columns); } } #ifdef DEBUG /* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */ print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual ); #endif // DEBUG } cudaMemcpy(surface,Surface,sizeof(float)*rows*columns,cudaMemcpyDeviceToHost); /* * * STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT * */ /* 5. Stop global time */ cudaDeviceSynchronize(); ttotal = cp_Wtime() - ttotal; /* 6. Output for leaderboard */ printf("\n"); /* 6.1. Total computation time */ printf("Time: %lf\n", ttotal ); /* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */ printf("Result: %d", iter); /* for (i=0; i<num_teams; i++) printf(" %d %d", teams[i].x, teams[i].y ); */ for (i=0; i<num_focal; i++) printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) ); printf("\n"); /* 7. Free resources */ free( teams ); free( focal ); free( surface ); free( surfaceCopy ); /* 8. End */ return 0; }
d41f3feefe65e48781cdc1c9f70f6a0f81fe800d.hip
// !!! This is a file automatically generated by hipify!!! /* * Global Switches */ #define SAC_DO_CHECK 0 #define SAC_DO_CHECK_TYPE 0 #define SAC_DO_CHECK_GPU 0 #define SAC_DO_CHECK_BOUNDARY 0 #define SAC_DO_CHECK_MALLOC 0 #define SAC_DO_CHECK_ERRNO 0 #define SAC_DO_CHECK_HEAP 0 #define SAC_DO_CHECK_DISTMEM 0 #define SAC_DO_CHECK_DISTMEMPHM 0 #define SAC_DO_PHM 0 #define SAC_DO_APS 0 #define SAC_DO_DAO 0 #define SAC_DO_MSCA 0 #define SAC_DO_PROFILE 0 #define SAC_DO_PROFILE_WITH 0 #define SAC_DO_PROFILE_FUN 0 #define SAC_DO_PROFILE_INL 0 #define SAC_DO_PROFILE_LIB 0 #define SAC_DO_PROFILE_DISTMEM 0 #define SAC_DO_TRACE 0 #define SAC_DO_TRACE_REF 0 #define SAC_DO_TRACE_MEM 0 #define SAC_DO_TRACE_PRF 0 #define SAC_DO_TRACE_FUN 0 #define SAC_DO_TRACE_WL 0 #define SAC_DO_TRACE_AA 0 #define SAC_DO_TRACE_MT 0 #define SAC_DO_TRACE_RTSPEC 0 #define SAC_DO_TRACE_DISTMEM 0 #define SAC_DO_CACHESIM 0 #define SAC_DO_CACHESIM_ADV 0 #define SAC_DO_CACHESIM_GLOBAL 1 #define SAC_DO_CACHESIM_FILE 0 #define SAC_DO_CACHESIM_PIPE 0 #define SAC_DO_CACHESIM_IMDT 1 #define SAC_DO_MULTITHREAD 0 #define SAC_DO_MT_PTHREAD 0 #define SAC_DO_MT_LPEL 0 #define SAC_DO_MT_OMP 0 #define SAC_DO_DISTMEM 0 #define SAC_DO_DISTMEM_GASNET 0 #define SAC_DO_DISTMEM_GPI 0 #define SAC_DO_DISTMEM_MPI 0 #define SAC_DO_DISTMEM_ARMCI 0 #define SAC_DO_DISTMEM_ALLOC_CACHE_OUTSIDE_DSM 0 #define SAC_DO_DISTMEM_PTR_DESC 0 #define SAC_DO_DISTMEM_PTR_CACHE 0 #define SAC_DO_THREADS_STATIC 1 #define SAC_DO_FP 0 #define SAC_DO_MT_CREATE_JOIN 0 #define SAC_DEBUG_RC 0 /* * Global Settings */ #define SAC_FORCE_DESC_SIZE -1 /* * MUTC Backend Specific Switches */ #define SAC_MUTC_FUNAP_AS_CREATE 0 #define SAC_MUTC_THREAD_MALLOC 0 #define SAC_MUTC_DISABLE_THREAD_MEM 0 #define SAC_MUTC_BENCH 0 #define SAC_MUTC_MACROS 0 #define SAC_MUTC_RC_PLACES 1 #define SAC_MUTC_RC_INDIRECT 0 #define SAC_MUTC_SEQ_DATA_PARALLEL 0 #define SAC_MUTC_FORCE_SPAWN_FLAGS #define SAC_CUDA_MACROS 1 #define SAC_OMP_MACROS 0 #define SAC_DO_COMPILE_MODULE 0 #define SAC_C_EXTERN extern "C" /* * Global Settings */ #ifndef NULL # ifdef __cplusplus # define NULL 0 # else # define NULL (void*) 0 # endif #endif #define SAC_SET_TMPDIR "/tmp" #define SAC_SET_INITIAL_MASTER_HEAPSIZE 1048576 #define SAC_SET_INITIAL_WORKER_HEAPSIZE 65536 #define SAC_SET_INITIAL_UNIFIED_HEAPSIZE 0 #ifndef SAC_SET_RTSPEC_THREADS #define SAC_SET_RTSPEC_THREADS 1 #endif #ifndef SAC_SET_MTMODE #define SAC_SET_MTMODE 0 #endif #define SAC_SET_CPU_BIND_STRATEGY 0 #define SAC_SET_BARRIER_TYPE 0 #define SAC_SET_SMART_DECISIONS 0 #define SAC_SET_SMART_FILENAME "default" #define SAC_SET_SMART_ARCH "(null)" #define SAC_SET_SMART_PERIOD 500 #ifndef SAC_SET_THREADS_MAX #define SAC_SET_THREADS_MAX 128 #endif #ifndef SAC_SET_THREADS #define SAC_SET_THREADS 1 #endif #ifndef SAC_OMP_ACTIVE_LEVEL #define SAC_OMP_ACTIVE_LEVEL 1 #endif #ifndef SAC_SET_MASTERCLASS #define SAC_SET_MASTERCLASS 0 #endif #define SAC_SET_NUM_SCHEDULERS 0 #define SAC_SET_CACHE_1_SIZE -1 #define SAC_SET_CACHE_1_LINE 4 #define SAC_SET_CACHE_1_ASSOC 1 #define SAC_SET_CACHE_1_WRITEPOL SAC_CS_default #define SAC_SET_CACHE_1_MSCA_FACTOR 0.00 #define SAC_SET_CACHE_2_SIZE -1 #define SAC_SET_CACHE_2_LINE 4 #define SAC_SET_CACHE_2_ASSOC 1 #define SAC_SET_CACHE_2_WRITEPOL SAC_CS_default #define SAC_SET_CACHE_2_MSCA_FACTOR 0.00 #define SAC_SET_CACHE_3_SIZE -1 #define SAC_SET_CACHE_3_LINE 4 #define SAC_SET_CACHE_3_ASSOC 1 #define SAC_SET_CACHE_3_WRITEPOL SAC_CS_default #define SAC_SET_CACHE_3_MSCA_FACTOR 0.00 #define SAC_SET_CACHESIM_HOST "" #define SAC_SET_CACHESIM_FILE "gconv2.cs" #define SAC_SET_CACHESIM_DIR "/tmp" #define SAC_SET_MAXFUN 0 #define SAC_SET_MAXFUNAP 1 #define SBLOCKSZ 16 #define LBLOCKSZ 256 /* * Includes */ #include "sac.h" #if SAC_OMP_MACROS #include "omp.h" #endif #if SAC_CUDA_MACROS #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #endif /* * SAC-Program gconv2.sac : */ /* Additional headers for external function declarations */ #include <stdlib.h> /* * type definitions */ SAC_ND_TYPEDEF((SACt_sacprelude_p__SACarg, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_Random__Random, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_ComplexBasics__complex, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, )))))))))), double) SAC_ND_TYPEDEF((SACt_Complex__complex, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (DOU, )))))))))), double) SAC_ND_TYPEDEF((SACt_World__World, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_C99Benchmarking__C99Benchmarking, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_String__string, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_Interval__Interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_C99Benchmarking__Interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval) SAC_ND_TYPEDEF((SACt_MTClock__MTClock, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_Terminal__Terminal, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_TermFile__TermFile, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) /* * Global Definitions */ SAC_MT_DEFINE() SAC_PF_DEFINE() SAC_HM_DEFINE() /* * prototypes for externals (FUNDECS) */ SAC_C_EXTERN /* * ND_FUN_DECL( SACrandom, (int, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2, in_nodesc, int, (SACl_MIN, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in_nodesc, int, (SACl_MAX, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACrandom, SAC_ND_TYPE_NT( (int, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), SAC_ND_PARAM_in_nodesc( (SACl_MIN, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in_nodesc( (SACl_MAX, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( to_string, , 3, out, SACt_String__string, (SAC_arg_1, (SCL, (HID, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), in, unsigned char, (SACl_A, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in_nodesc, int, (SACl_LENGTH, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( to_string, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (HID, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), SACt_String__string), SAC_ND_PARAM_in( (SACl_A, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in_nodesc( (SACl_LENGTH, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); /* * prototypes for locals (FUNDEFS) */ SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__partitionSlicer__i_S__i_S__i_S__i_S__i_S, , 9, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_4, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_min, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_max, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_axis, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_lb, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ub, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__partitionSlicer__i_S__i_S__i_S__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_4, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_min, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_max, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_axis, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_lb, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ub, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__partitionIntersectMax__i_S__i_S, , 3, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_PWLbound1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ivmin, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__partitionIntersectMax__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_PWLbound1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ivmin, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__isPartitionIntersectNull__i_S__i_S__i_S__i_S, , 5, out, bool, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idxmin, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idxmax, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound2, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__isPartitionIntersectNull__i_S__i_S__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idxmin, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idxmax, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound2, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__partitionIntersectMin__i_S__i_S, , 3, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_PWLbound2, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ivmax, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__partitionIntersectMin__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_PWLbound2, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ivmax, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__adjustLacFunParams__bl_S__i_S__i_S, , 4, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_iv, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__adjustLacFunParams__bl_S__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_iv, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN__main, , 1, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN__main, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__prod__i_S, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_v, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__prod__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_v, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__adjustLacFunParamsReshape__bl_S__i_S__i_S__i_S, , 5, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_iv, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_shp, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__adjustLacFunParamsReshape__bl_S__i_S__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_iv, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_shp, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__i_S__i_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__f_S__f_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, float, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__f_S__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__d_S__d_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, double, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, double, (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__d_S__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__bl_S__bl_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__bl_S__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__c_S__c_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, unsigned char, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, unsigned char, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__c_S__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__b_S__b_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, byte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, byte, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__b_S__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__s_S__s_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, short, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, short, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__s_S__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__l_S__l_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, long, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, long, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__l_S__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__ll_S__ll_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, longlong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, longlong, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__ll_S__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__ub_S__ub_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ubyte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, ubyte, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__ub_S__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__us_S__us_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ushort, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, ushort, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__us_S__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__ui_S__ui_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, uint, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, uint, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__ui_S__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__ul_S__ul_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ulong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, ulong, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__ul_S__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__ull_S__ull_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ulonglong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, ulonglong, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__ull_S__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__gridFiller__i_S__i_S__i_S__i_S__i_S, , 8, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_lb, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ub, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_wdth, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_dim, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_maxwidth, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__gridFiller__i_S__i_S__i_S__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_lb, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ub, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_wdth, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_dim, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_maxwidth, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__isPartitionIntersect1Part__i_S__i_S__i_S__i_S, , 5, out, bool, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idxmin, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idxmax, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound2, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__isPartitionIntersect1Part__i_S__i_S__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idxmin, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idxmax, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound2, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__i_S, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__d_S, , 2, out, double, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, double, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__f_S, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__c_S, , 2, out, unsigned char, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, unsigned char, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__bl_S, , 2, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__b_S, , 2, out, byte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, byte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__s_S, , 2, out, short, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, short, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__l_S, , 2, out, long, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, long, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__ll_S, , 2, out, longlong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, longlong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__ub_S, , 2, out, ubyte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, ubyte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__us_S, , 2, out, ushort, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, ushort, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__ui_S, , 2, out, uint, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, uint, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__ul_S, , 2, out, ulong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, ulong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__ull_S, , 2, out, ulonglong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, ulonglong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__partitionMax__i_S__i_S, , 3, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_x, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_y, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__partitionMax__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_x, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_y, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__partitionMin__i_S__i_S, , 3, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_x, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_y, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__partitionMin__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_x, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_y, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___PL_PL__i_S, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_a, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___PL_PL__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_a, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf__MAIN__main, , 1, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf__MAIN__main, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf__MAIN_CL_INIT__init, , 0) */ SAC_ND_DECL_FUN2( SACf__MAIN_CL_INIT__init, void, void); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_ScalarArith___PL__f__f, , 3, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_B, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_ScalarArith___PL__f__f, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_B, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__ull_S, , 2, out, ulonglong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, ulonglong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__ul_S, , 2, out, ulong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, ulong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__ui_S, , 2, out, uint, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, uint, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__us_S, , 2, out, ushort, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, ushort, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__ub_S, , 2, out, ubyte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, ubyte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__ll_S, , 2, out, longlong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, longlong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__l_S, , 2, out, long, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, long, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__s_S, , 2, out, short, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, short, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__b_S, , 2, out, byte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, byte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__bl_S, , 2, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__c_S, , 2, out, unsigned char, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, unsigned char, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__f_S, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__d_S, , 2, out, double, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, double, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__i_S, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_World_CL_INIT__init_TheWorld__SACt_World__World, , 1, inout, SACt_World__World, (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_World_CL_INIT__init_TheWorld__SACt_World__World, void, SAC_ND_PARAM_inout( (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_World__World)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_C99Benchmarking_CL_INIT__init_TheBenchmarkObject__SACt_C99Benchmarking__C99Benchmarking, , 1, inout, SACt_C99Benchmarking__C99Benchmarking, (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_C99Benchmarking_CL_INIT__init_TheBenchmarkObject__SACt_C99Benchmarking__C99Benchmarking, void, SAC_ND_PARAM_inout( (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_C99Benchmarking__C99Benchmarking)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_C99Benchmarking__getInterval__SACt_String__string__i__i, , 4, out, SACt_Interval__Interval, (SAC_arg_1, (SCL, (HID, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), in, SACt_String__string, (SACl_interval_name, (SCL, (HID, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), in, int, (SACl_interval_number, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_unit_time, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_C99Benchmarking__getInterval__SACt_String__string__i__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (HID, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval), SAC_ND_PARAM_in( (SACl_interval_name, (SCL, (HID, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), SACt_String__string), SAC_ND_PARAM_in( (SACl_interval_number, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_unit_time, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_MTClock_CL_INIT__init_TheMTClock__SACt_MTClock__MTClock, , 1, inout, SACt_MTClock__MTClock, (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_MTClock_CL_INIT__init_TheMTClock__SACt_MTClock__MTClock, void, SAC_ND_PARAM_inout( (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_MTClock__MTClock)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_C99Benchmarking__start__SACt_C99Benchmarking__Interval, , 1, inout, SACt_Interval__Interval, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_C99Benchmarking__start__SACt_C99Benchmarking__Interval, void, SAC_ND_PARAM_inout( (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_C99Benchmarking__end__SACt_C99Benchmarking__Interval, , 1, inout, SACt_Interval__Interval, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_C99Benchmarking__end__SACt_C99Benchmarking__Interval, void, SAC_ND_PARAM_inout( (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_Terminal_CL_INIT__init_TheTerminal__SACt_Terminal__Terminal, , 1, inout, SACt_Terminal__Terminal, (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_Terminal_CL_INIT__init_TheTerminal__SACt_Terminal__Terminal, void, SAC_ND_PARAM_inout( (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_Terminal__Terminal)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_TermFile_CL_INIT__init_stdout__SACt_TermFile__TermFile, , 1, inout, SACt_TermFile__TermFile, (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_TermFile_CL_INIT__init_stdout__SACt_TermFile__TermFile, void, SAC_ND_PARAM_inout( (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_TermFile__TermFile)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_C99Benchmarking__printResult__SACt_C99Benchmarking__Interval, , 1, inout, SACt_Interval__Interval, (SACl_int1, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_C99Benchmarking__printResult__SACt_C99Benchmarking__Interval, void, SAC_ND_PARAM_inout( (SACl_int1, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__ull_S__ull_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ulonglong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, ulonglong, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__ull_S__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__ul_S__ul_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ulong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, ulong, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__ul_S__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__ui_S__ui_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, uint, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, uint, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__ui_S__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__us_S__us_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ushort, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, ushort, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__us_S__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__ub_S__ub_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ubyte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, ubyte, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__ub_S__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__ll_S__ll_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, longlong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, longlong, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__ll_S__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__l_S__l_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, long, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, long, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__l_S__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__s_S__s_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, short, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, short, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__s_S__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__b_S__b_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, byte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, byte, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__b_S__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__c_S__c_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, unsigned char, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, unsigned char, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__c_S__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__bl_S__bl_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__bl_S__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__d_S__d_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, double, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, double, (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__d_S__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__f_S__f_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, float, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__f_S__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__i_S__i_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__adjustLacFunParams__bl_S__i_S__i_X, , 4, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_iv, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__adjustLacFunParams__bl_S__i_S__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_iv, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__adjustLacFunParamsReshape__bl_S__i_S__i_X__i_X, , 5, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_iv, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_shp, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__adjustLacFunParamsReshape__bl_S__i_S__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_iv, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_shp, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__isPartitionIntersectNull__i_X__i_X__i_X__i_X, , 5, out, bool, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idxmin, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idxmax, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__isPartitionIntersectNull__i_X__i_X__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idxmin, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idxmax, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__isPartitionIntersectNull__i__i__i_X__i_X, , 5, out, bool, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idxmin, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idxmax, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__isPartitionIntersectNull__i__i__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idxmin, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idxmax, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionMin__i__i, , 3, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_x, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_y, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionMin__i__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_x, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_y, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionMax__i__i, , 3, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_x, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_y, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionMax__i__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_x, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_y, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__isPartitionIntersect1Part__i_X__i_X__i_X__i_X, , 5, out, bool, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idxmin, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idxmax, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__isPartitionIntersect1Part__i_X__i_X__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idxmin, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idxmax, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__isPartitionIntersect1Part__i__i__i_X__i_X, , 5, out, bool, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idxmin, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idxmax, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__isPartitionIntersect1Part__i__i__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idxmin, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idxmax, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionIntersectMax__i_X__i_X, , 3, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_PWLbound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ivmin, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionIntersectMax__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_PWLbound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ivmin, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionIntersectMax__i_X__i, , 3, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_PWLbound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ivmin, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionIntersectMax__i_X__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_PWLbound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ivmin, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionIntersectMin__i_X__i_X, , 3, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_PWLbound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ivmax, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionIntersectMin__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_PWLbound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ivmax, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionIntersectMin__i_X__i, , 3, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_PWLbound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ivmax, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionIntersectMin__i_X__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_PWLbound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ivmax, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__prod__i_X, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_v, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__prod__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_v, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionSlicer__i_X__i_X__i__i_X__i_X, , 9, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_4, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_min, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_max, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_axis, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_lb, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ub, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionSlicer__i_X__i_X__i__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_4, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_min, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_max, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_axis, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_lb, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ub, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__gridFiller__i_X__i_X__i_X__i__i_X, , 8, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_lb, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ub, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_wdth, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_dim, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_maxwidth, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__gridFiller__i_X__i_X__i_X__i__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_lb, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ub, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_wdth, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_dim, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_maxwidth, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___PL_PL__i, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_a, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___PL_PL__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_a, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf__MAIN_CLsacprelude_p__zero__f_1, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf__MAIN_CLsacprelude_p__zero__f_1, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, , 4, out, float, (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf__MAIN_CLsacprelude_p__zero__f_X, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf__MAIN_CLsacprelude_p__zero__f_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf__MAIN_CLsacprelude_p__zero__f_X_X_X_X_X, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf__MAIN_CLsacprelude_p__zero__f_X_X_X_X_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___ST__i__i, , 3, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_a, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_b, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___ST__i__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_a, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_b, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__and__bl__bl, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_a, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_b, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__and__bl__bl, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_a, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_b, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * CUDA_GLOBALFUN_DECL( SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X, 14, inout, float, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), -7, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, float, (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, in, float, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), -7, in, int, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 1) */ __global__ void SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X(SAC_CUDA_PARAM_inout( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_CUDA_PARAM_in( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), int), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SIZE((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))))); ; SAC_C_EXTERN /* * CUDA_GLOBALFUN_DECL( SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, inout, float, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0) */ __global__ void SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f(SAC_CUDA_PARAM_in( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_CUDA_PARAM_inout( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); ; SAC_C_EXTERN /* * CUDA_GLOBALFUN_DECL( SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, inout, float, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0) */ __global__ void SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f(SAC_CUDA_PARAM_in( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_CUDA_PARAM_inout( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); ; /* * global objects */ /* * ND_OBJDEF_EXTERN( (RandomGen, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Random__Random, 0) */ SAC_ND_DECL__DATA( (RandomGen, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Random__Random, extern) SAC_ND_DECL__DESC( (RandomGen, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), extern) SAC_NOTHING() /* * ND_OBJDEF_EXTERN( (SACo_World__TheWorld, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_World__World, 0) */ SAC_ND_DECL__DATA( (SACo_World__TheWorld, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_World__World, extern) SAC_ND_DECL__DESC( (SACo_World__TheWorld, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), extern) SAC_NOTHING() /* * ND_OBJDEF_EXTERN( (SACo_C99Benchmarking__TheBenchmarkObject, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_C99Benchmarking__C99Benchmarking, 0) */ SAC_ND_DECL__DATA( (SACo_C99Benchmarking__TheBenchmarkObject, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_C99Benchmarking__C99Benchmarking, extern) SAC_ND_DECL__DESC( (SACo_C99Benchmarking__TheBenchmarkObject, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), extern) SAC_NOTHING() /* * ND_OBJDEF_EXTERN( (SACo_MTClock__TheMTClock, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_MTClock__MTClock, 0) */ SAC_ND_DECL__DATA( (SACo_MTClock__TheMTClock, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_MTClock__MTClock, extern) SAC_ND_DECL__DESC( (SACo_MTClock__TheMTClock, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), extern) SAC_NOTHING() /* * ND_OBJDEF_EXTERN( (SACo_Terminal__TheTerminal, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Terminal__Terminal, 0) */ SAC_ND_DECL__DATA( (SACo_Terminal__TheTerminal, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Terminal__Terminal, extern) SAC_ND_DECL__DESC( (SACo_Terminal__TheTerminal, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), extern) SAC_NOTHING() /* * ND_OBJDEF_EXTERN( (SACo_TermFile__stdout, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_TermFile__TermFile, 0) */ SAC_ND_DECL__DATA( (SACo_TermFile__stdout, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_TermFile__TermFile, extern) SAC_ND_DECL__DESC( (SACo_TermFile__stdout, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), extern) SAC_NOTHING() /* * function definitions (FUNDEFS) */ /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN::SACwf__MAIN__main(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN__main, , 1, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN__main, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf__MAIN__main, , 1, out, int, SAC_SET_NT_USG( FAG, (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SAC_ND_FUNAP2( SACf__MAIN__main, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)) /* * ND_REFRESH__MIRROR( (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN__main, , 1, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__i_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__i_S, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__i_S, , 2, out, int, SAC_SET_NT_USG( FAG, (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__i_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))), int)) /* * ND_REFRESH__MIRROR( (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__i_S, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__d_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__d_S, , 2, out, double, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, double, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, )))))))))), double, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, )))))))))), double, ) SAC_ND_DECL__DESC( (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__d_S, , 2, out, double, SAC_SET_NT_USG( FAG, (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, ))))))))))), in, double, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__d_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, ))))))))))), double), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))), double)) /* * ND_REFRESH__MIRROR( (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__d_S, , 2, out, double, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, double, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__f_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__f_S, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { SAC_ND_DECL_CONST__DATA((SACp_pinl_9392__emal_8357__cwc_719__SSA1_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0.0f) SAC_ND_DECL_CONST__DATA((SACp_pinl_9391__emal_8356__cwc_733, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0.0f) /* * ND_DECL( (SACp_emal_8355__cwc_721, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8355__cwc_721, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_8355__cwc_721, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), bool, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), bool, ) SAC_ND_DECL__DESC( (SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); SAC_INIT_LOCAL_MEM() SAC_ND_PRF_DIM_A__DATA((SACp_emal_8355__cwc_721, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -2) SAC_ND_DEC_RC_FREE((SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) SAC_ND_PRF_SxS__DATA((SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), SAC_ND_PRF_EQ, SAC_ND_READ((SACp_emal_8355__cwc_721, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 5) SAC_ND_FREE((SACp_emal_8355__cwc_721, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) if (SAC_ND_GETVAR((SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), SACp_emal_8354__cwc_735)) { SAC_ND_FREE((SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), ) /* * ND_ASSIGN( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_pinl_9391__emal_8356__cwc_733, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 106, "Assignment with incompatible types found!"); SAC_NOOP() SAC_NOOP() SAC_NOOP() SAC_ND_ASSIGN__DATA( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_pinl_9391__emal_8356__cwc_733, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) } else { SAC_ND_FREE((SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), ) /* * ND_ASSIGN( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_pinl_9392__emal_8357__cwc_719__SSA1_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 106, "Assignment with incompatible types found!"); SAC_NOOP() SAC_NOOP() SAC_NOOP() SAC_ND_ASSIGN__DATA( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_pinl_9392__emal_8357__cwc_719__SSA1_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) } /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__f_S, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__c_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__c_S, , 2, out, unsigned char, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, unsigned char, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), unsigned char, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), unsigned char, ) SAC_ND_DECL__DESC( (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__c_S, , 2, out, unsigned char, SAC_SET_NT_USG( FAG, (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))), in, unsigned char, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__c_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))), unsigned char), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))), unsigned char)) /* * ND_REFRESH__MIRROR( (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__c_S, , 2, out, unsigned char, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, unsigned char, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__bl_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__bl_S, , 2, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), bool, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), bool, ) SAC_ND_DECL__DESC( (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__bl_S, , 2, out, bool, SAC_SET_NT_USG( FAG, (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, ))))))))))), in, bool, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__bl_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, ))))))))))), bool), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))), bool)) /* * ND_REFRESH__MIRROR( (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__bl_S, , 2, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__b_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__b_S, , 2, out, byte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, byte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, )))))))))), byte, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, )))))))))), byte, ) SAC_ND_DECL__DESC( (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__b_S, , 2, out, byte, SAC_SET_NT_USG( FAG, (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, ))))))))))), in, byte, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__b_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, ))))))))))), byte), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))), byte)) /* * ND_REFRESH__MIRROR( (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__b_S, , 2, out, byte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, byte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__s_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__s_S, , 2, out, short, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, short, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, )))))))))), short, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, )))))))))), short, ) SAC_ND_DECL__DESC( (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__s_S, , 2, out, short, SAC_SET_NT_USG( FAG, (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, ))))))))))), in, short, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__s_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, ))))))))))), short), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))), short)) /* * ND_REFRESH__MIRROR( (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__s_S, , 2, out, short, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, short, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__l_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__l_S, , 2, out, long, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, long, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, )))))))))), long, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, )))))))))), long, ) SAC_ND_DECL__DESC( (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__l_S, , 2, out, long, SAC_SET_NT_USG( FAG, (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, ))))))))))), in, long, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__l_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, ))))))))))), long), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))), long)) /* * ND_REFRESH__MIRROR( (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__l_S, , 2, out, long, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, long, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__ll_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__ll_S, , 2, out, longlong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, longlong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, )))))))))), longlong, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, )))))))))), longlong, ) SAC_ND_DECL__DESC( (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__ll_S, , 2, out, longlong, SAC_SET_NT_USG( FAG, (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, ))))))))))), in, longlong, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__ll_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, ))))))))))), longlong), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))), longlong)) /* * ND_REFRESH__MIRROR( (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__ll_S, , 2, out, longlong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, longlong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__ub_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__ub_S, , 2, out, ubyte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, ubyte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, )))))))))), ubyte, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, )))))))))), ubyte, ) SAC_ND_DECL__DESC( (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__ub_S, , 2, out, ubyte, SAC_SET_NT_USG( FAG, (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, ))))))))))), in, ubyte, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__ub_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, ))))))))))), ubyte), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))), ubyte)) /* * ND_REFRESH__MIRROR( (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__ub_S, , 2, out, ubyte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, ubyte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__us_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__us_S, , 2, out, ushort, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, ushort, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, )))))))))), ushort, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, )))))))))), ushort, ) SAC_ND_DECL__DESC( (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__us_S, , 2, out, ushort, SAC_SET_NT_USG( FAG, (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, ))))))))))), in, ushort, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__us_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, ))))))))))), ushort), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))), ushort)) /* * ND_REFRESH__MIRROR( (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__us_S, , 2, out, ushort, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, ushort, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__ui_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__ui_S, , 2, out, uint, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, uint, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, )))))))))), uint, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, )))))))))), uint, ) SAC_ND_DECL__DESC( (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__ui_S, , 2, out, uint, SAC_SET_NT_USG( FAG, (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, ))))))))))), in, uint, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__ui_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, ))))))))))), uint), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))), uint)) /* * ND_REFRESH__MIRROR( (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__ui_S, , 2, out, uint, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, uint, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__ul_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__ul_S, , 2, out, ulong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, ulong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, )))))))))), ulong, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, )))))))))), ulong, ) SAC_ND_DECL__DESC( (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__ul_S, , 2, out, ulong, SAC_SET_NT_USG( FAG, (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, ))))))))))), in, ulong, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__ul_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, ))))))))))), ulong), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))), ulong)) /* * ND_REFRESH__MIRROR( (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__ul_S, , 2, out, ulong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, ulong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__ull_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__ull_S, , 2, out, ulonglong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, ulonglong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, )))))))))), ulonglong, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, )))))))))), ulonglong, ) SAC_ND_DECL__DESC( (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__ull_S, , 2, out, ulonglong, SAC_SET_NT_USG( FAG, (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, ))))))))))), in, ulonglong, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__ull_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, ))))))))))), ulonglong), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))), ulonglong)) /* * ND_REFRESH__MIRROR( (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__ull_S, , 2, out, ulonglong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, ulonglong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN::SACf__MAIN__main(...) [ body ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACf__MAIN__main, , 1, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACf__MAIN__main, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), unsigned char, -3) */ SAC_ND_DECL__DATA( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), unsigned char, ) SAC_ND_DECL__DESC( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0); int SAC_ND_A_MIRROR_SIZE( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) = 1; /* * ND_DECL( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; /* * ND_DECL( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; SAC_ND_DECL_CONST__DATA((SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 2147483647) /* * ND_DECL( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() SAC_ND_DECL_CONST__DATA((SACp_emal_8395__cnstass_8337_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8394__cnstass_8336_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8393__cnstass_8335_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8392__cnstass_8334_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8391__cnstass_8333_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8390__cnstass_8332_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 7) SAC_ND_DECL_CONST__DATA((SACp_emal_8389__cnstass_8331_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 7) SAC_ND_DECL_CONST__DATA((SACp_emal_8388__cnstass_8330_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 32) SAC_ND_DECL_CONST__DATA((SACp_emal_8387__cnstass_8329_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 32) SAC_ND_DECL_CONST__DATA((SACp_emal_8386__cnstass_8328_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 32) /* * ND_DECL( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, 5, 32, 32, 32, 7, 7) */ SAC_ND_DECL__DATA( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = 7; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = 7; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 1605632; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 5; /* * ND_DECL( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 5, 32, 32, 32, 7, 7) */ SAC_ND_DECL__DATA( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = 7; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = 7; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 1605632; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; /* * ND_DECL( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() SAC_ND_DECL_CONST__DATA((SACp_emal_8380__cnstass_8327_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8379__cnstass_8326_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8378__cnstass_8325_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8377__cnstass_8324_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8376__cnstass_8323_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8375__cnstass_8322_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 7) SAC_ND_DECL_CONST__DATA((SACp_emal_8374__cnstass_8321_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 7) SAC_ND_DECL_CONST__DATA((SACp_emal_8373__cnstass_8320_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 32) SAC_ND_DECL_CONST__DATA((SACp_emal_8372__cnstass_8319_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 32) SAC_ND_DECL_CONST__DATA((SACp_emal_8371__cnstass_8318_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 32) /* * ND_DECL( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, 5, 32, 32, 32, 7, 7) */ SAC_ND_DECL__DATA( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = 7; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = 7; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 1605632; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 5; /* * ND_DECL( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 5, 32, 32, 32, 7, 7) */ SAC_ND_DECL__DATA( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = 7; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = 7; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 1605632; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; SAC_ND_DECL_CONST__DATA((SACp_emal_8367__flat_471, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 2) SAC_ND_DECL_CONST__DATA((SACp_emal_8366__flat_469, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 4) /* * ND_DECL( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), unsigned char, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), unsigned char, ) SAC_ND_DECL__DESC( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 1, 1) */ SAC_ND_DECL__DATA( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = 1; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 1; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_8358__pinl_1816__flat_252, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8358__pinl_1816__flat_252, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_8358__pinl_1816__flat_252, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; /* * ND_DECL( (SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval, 0) */ SAC_ND_DECL__DATA( (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval, ) SAC_ND_DECL__DESC( (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_String__string, 0) */ SAC_ND_DECL__DATA( (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_String__string, ) SAC_ND_DECL__DESC( (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf__MAIN_CL_INIT__init, , 0) */ SAC_ND_FUNAP2( SACf__MAIN_CL_INIT__init, ) /* * ND_FUN_AP( SACrandom, SACp_flat_444, 2, in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SACp_flat_444 = SACrandom( SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)); SAC_ND_ALLOC__DESC((SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) /* * ND_REFRESH__MIRROR( (SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_NOOP() SAC_ND_SET__RC((SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) SAC_ND_ALLOC_BEGIN((SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 306, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) SAC_ND_PRF_S__DATA((SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_TOF, SAC_ND_READ((SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_DEC_RC_FREE((SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, ) SAC_ND_PRF_SxS__DATA((SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_DIV, SAC_ND_READ((SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), 2147483648.0f) SAC_CUDA_ALLOC_BEGIN((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 5, 32, 32, 32, 7, 7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == 5), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) == 7), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) == 7), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_CUDA_ALLOC_END((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) SAC_ND_ALLOC_BEGIN((SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * CUDA_GRID_BLOCK( 15, SAC_ND_READ( (SACp_emal_8390__cnstass_8332_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8389__cnstass_8331_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8388__cnstass_8330_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8387__cnstass_8329_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8386__cnstass_8328_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8395__cnstass_8337_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8394__cnstass_8336_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8393__cnstass_8335_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8392__cnstass_8334_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8391__cnstass_8333_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, 0, 0, 0) */ { dim3 grid((SAC_ND_READ( (SACp_emal_8387__cnstass_8329_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8392__cnstass_8334_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_8386__cnstass_8328_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8391__cnstass_8333_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); if (grid.x > 2147483647 || grid.y > 65535 || grid.z > 65535) { SAC_RuntimeError("CUDA XYZ grid dimension exceeds compute compatibilities max value: 2147483647 x 65535 x 65535"); } dim3 block((SAC_ND_READ( (SACp_emal_8390__cnstass_8332_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8395__cnstass_8337_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_8389__cnstass_8331_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8394__cnstass_8336_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_8388__cnstass_8330_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8393__cnstass_8335_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); if (block.x > 2147483647 || block.y > 65535 || block.z > 65535) { SAC_RuntimeError("CUDA XYZ block dimension exceeds compute compatibilities max value: 2147483647 x 65535 x 65535"); } /* * CUDA_GLOBALFUN_AP( SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), inout, float, 5, SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8390__cnstass_8332_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8389__cnstass_8331_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8388__cnstass_8330_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8387__cnstass_8329_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8386__cnstass_8328_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8395__cnstass_8337_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8394__cnstass_8336_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8393__cnstass_8335_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8392__cnstass_8334_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8391__cnstass_8333_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ hipLaunchKernelGGL(( SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f), dim3(grid), dim3(block), 0, 0, SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float), SAC_CUDA_ARG_inout( SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), float), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 0), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 1), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 2), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 3), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 4), SAC_ND_A_MIRROR_SIZE(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_ND_A_MIRROR_DIM(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8390__cnstass_8332_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8389__cnstass_8331_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8388__cnstass_8330_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8387__cnstass_8329_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8386__cnstass_8328_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8395__cnstass_8337_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8394__cnstass_8336_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8393__cnstass_8335_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8392__cnstass_8334_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8391__cnstass_8333_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)); SAC_CUDA_GET_LAST_KERNEL_ERROR(); } /* * ND_REFRESH__MIRROR( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 5) */ SAC_NOOP() SAC_ND_FREE((SACp_emal_8390__cnstass_8332_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8389__cnstass_8331_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8388__cnstass_8330_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8387__cnstass_8329_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8386__cnstass_8328_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8395__cnstass_8337_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8394__cnstass_8336_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8393__cnstass_8335_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8392__cnstass_8334_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8391__cnstass_8333_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 5, 32, 32, 32, 7, 7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 5), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) == 7), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) == 7), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * CUDA_MEM_TRANSFER( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, hipMemcpyDeviceToHost) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_ND_A_SIZE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), 222, "hipMemcpy: Destionation and source arrays should have equal sizes!"); SAC_CUDA_MEM_TRANSFER((SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, hipMemcpyDeviceToHost) SAC_CUDA_FREE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) /* * ND_FUN_AP( SACrandom, SACp_flat_457, 2, in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SACp_flat_457 = SACrandom( SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)); SAC_ND_ALLOC__DESC((SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) /* * ND_REFRESH__MIRROR( (SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_NOOP() SAC_ND_SET__RC((SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) SAC_ND_ALLOC_BEGIN((SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 306, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) SAC_ND_PRF_S__DATA((SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_TOF, SAC_ND_READ((SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_DEC_RC_FREE((SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, ) SAC_ND_PRF_SxS__DATA((SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_DIV, SAC_ND_READ((SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), 2147483648.0f) SAC_CUDA_ALLOC_BEGIN((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 5, 32, 32, 32, 7, 7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == 5), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) == 7), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) == 7), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_CUDA_ALLOC_END((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) SAC_ND_ALLOC_BEGIN((SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * CUDA_GRID_BLOCK( 15, SAC_ND_READ( (SACp_emal_8375__cnstass_8322_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8374__cnstass_8321_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8373__cnstass_8320_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8372__cnstass_8319_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8371__cnstass_8318_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8380__cnstass_8327_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8379__cnstass_8326_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8378__cnstass_8325_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8377__cnstass_8324_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8376__cnstass_8323_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, 0, 0, 0) */ { dim3 grid((SAC_ND_READ( (SACp_emal_8372__cnstass_8319_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8377__cnstass_8324_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_8371__cnstass_8318_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8376__cnstass_8323_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); if (grid.x > 2147483647 || grid.y > 65535 || grid.z > 65535) { SAC_RuntimeError("CUDA XYZ grid dimension exceeds compute compatibilities max value: 2147483647 x 65535 x 65535"); } dim3 block((SAC_ND_READ( (SACp_emal_8375__cnstass_8322_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8380__cnstass_8327_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_8374__cnstass_8321_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8379__cnstass_8326_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_8373__cnstass_8320_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8378__cnstass_8325_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); if (block.x > 2147483647 || block.y > 65535 || block.z > 65535) { SAC_RuntimeError("CUDA XYZ block dimension exceeds compute compatibilities max value: 2147483647 x 65535 x 65535"); } /* * CUDA_GLOBALFUN_AP( SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), inout, float, 5, SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8375__cnstass_8322_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8374__cnstass_8321_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8373__cnstass_8320_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8372__cnstass_8319_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8371__cnstass_8318_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8380__cnstass_8327_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8379__cnstass_8326_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8378__cnstass_8325_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8377__cnstass_8324_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8376__cnstass_8323_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ hipLaunchKernelGGL(( SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f), dim3(grid), dim3(block), 0, 0, SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float), SAC_CUDA_ARG_inout( SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), float), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 0), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 1), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 2), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 3), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 4), SAC_ND_A_MIRROR_SIZE(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_ND_A_MIRROR_DIM(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8375__cnstass_8322_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8374__cnstass_8321_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8373__cnstass_8320_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8372__cnstass_8319_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8371__cnstass_8318_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8380__cnstass_8327_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8379__cnstass_8326_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8378__cnstass_8325_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8377__cnstass_8324_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8376__cnstass_8323_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)); SAC_CUDA_GET_LAST_KERNEL_ERROR(); } /* * ND_REFRESH__MIRROR( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 5) */ SAC_NOOP() SAC_ND_FREE((SACp_emal_8375__cnstass_8322_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8374__cnstass_8321_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8373__cnstass_8320_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8372__cnstass_8319_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8371__cnstass_8318_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8380__cnstass_8327_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8379__cnstass_8326_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8378__cnstass_8325_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8377__cnstass_8324_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8376__cnstass_8323_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 5, 32, 32, 32, 7, 7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 5), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) == 7), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) == 7), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * CUDA_MEM_TRANSFER( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, hipMemcpyDeviceToHost) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_ND_A_SIZE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), 222, "hipMemcpy: Destionation and source arrays should have equal sizes!"); SAC_CUDA_MEM_TRANSFER((SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, hipMemcpyDeviceToHost) SAC_CUDA_FREE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 1, 1, unsigned char) /* * ND_SET__SHAPE_arr( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) == 1), 1, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0) == 5), 1, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 1, 1, unsigned char) SAC_ND_CREATE__STRING__DATA((SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), "work") /* * ND_ASSIGN( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), -3, (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 1, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) == 1), 51, "Assignment with incompatible types found!"); SAC_ND_A_DESC( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) = SAC_ND_A_DESC( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0); SAC_ND_A_MIRROR_SIZE( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) = SAC_ND_A_SIZE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0); SAC_ND_A_DESC_SIZE( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) = SAC_ND_A_SIZE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))); SAC_ND_ASSIGN__DATA( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), ) /* * ND_FUN_AP( to_string, , 3, out, SACt_String__string, SAC_SET_NT_USG( FAG, (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, unsigned char, SAC_SET_NT_USG( FAG, (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))), in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8366__flat_469, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SAC_ND_FUNAP2( to_string, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), SACt_String__string), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))), unsigned char), SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8366__flat_469, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)) /* * ND_REFRESH__MIRROR( (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) */ SAC_NOOP() SAC_ND_DEC_RC_FREE((SACp_emal_8366__flat_469, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, ) /* * ND_FUN_AP( SACf_C99Benchmarking__getInterval__SACt_String__string__i__i, , 4, out, SACt_Interval__Interval, SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, SACt_String__string, SAC_SET_NT_USG( FAG, (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, int, SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, SAC_SET_NT_USG( FAG, (SACp_emal_8367__flat_471, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SAC_ND_FUNAP2( SACf_C99Benchmarking__getInterval__SACt_String__string__i__i, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), SACt_Interval__Interval), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), SACt_String__string), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8367__flat_471, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)) /* * ND_REFRESH__MIRROR( (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_AP( SACf_C99Benchmarking__start__SACt_C99Benchmarking__Interval, , 1, inout, SACt_Interval__Interval, SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))) */ SAC_ND_FUNAP2( SACf_C99Benchmarking__start__SACt_C99Benchmarking__Interval, SAC_ND_ARG_inout( SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), SACt_Interval__Interval)) /* * ND_FUN_AP( SACrandom, SACp_flat_475, 2, in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SACp_flat_475 = SACrandom( SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)); SAC_ND_ALLOC__DESC((SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) /* * ND_REFRESH__MIRROR( (SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_NOOP() SAC_ND_SET__RC((SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) SAC_ND_DEC_RC_FREE((SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, ) SAC_ND_ALLOC_BEGIN((SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 306, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) SAC_ND_PRF_S__DATA((SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_TOF, SAC_ND_READ((SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_DEC_RC_FREE((SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, ) SAC_ND_PRF_SxS__DATA((SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_DIV, SAC_ND_READ((SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), 2147483648.0f) SAC_ND_ALLOC_BEGIN((SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 1, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 1) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 1), 54, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) == 1), 54, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 1, float) /* * ND_CREATE__ARRAY__DATA( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 1, (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) */ SAC_ND_WRITE_COPY( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, SAC_ND_READ( (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) /* * ND_ASSIGN( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 5, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 5), 54, "Assignment with incompatible types found!"); SAC_ND_A_DESC( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_MIRROR_SIZE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_SIZE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_DESC_SIZE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_SIZE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_ASSIGN__DATA( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) /* * ND_ASSIGN( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 5, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 5), 54, "Assignment with incompatible types found!"); SAC_ND_A_DESC( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_MIRROR_SIZE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_SIZE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_DESC_SIZE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_SIZE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_ASSIGN__DATA( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) /* * ND_FUN_AP( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, , 4, out, float, SAC_SET_NT_USG( FAG, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), in, float, SAC_SET_NT_USG( FAG, (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), in, float, SAC_SET_NT_USG( FAG, (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), in, float, SAC_SET_NT_USG( FAG, (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))))) */ SAC_ND_FUNAP2( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float)) /* * ND_REFRESH__MIRROR( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7) */ SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_MIRROR_SIZE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_ALLOC_BEGIN((SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 54, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 54, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_PRF_SHAPE_A__DATA( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7) */ SAC_TR_PRF_PRINT( ("ND_PRF_SHAPE_A__DATA( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7)")) SAC_ND_WRITE_COPY( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_A_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), ); SAC_ND_WRITE_COPY( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, SAC_ND_A_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1), ); SAC_ND_WRITE_COPY( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2, SAC_ND_A_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2), ); SAC_ND_WRITE_COPY( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, SAC_ND_A_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3), ); SAC_ND_WRITE_COPY( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4, SAC_ND_A_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4), ); /* * ND_FUN_AP( SACf_C99Benchmarking__end__SACt_C99Benchmarking__Interval, , 1, inout, SACt_Interval__Interval, SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))) */ SAC_ND_FUNAP2( SACf_C99Benchmarking__end__SACt_C99Benchmarking__Interval, SAC_ND_ARG_inout( SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), SACt_Interval__Interval)) /* * ND_FUN_AP( SACf_C99Benchmarking__printResult__SACt_C99Benchmarking__Interval, , 1, inout, SACt_Interval__Interval, SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))) */ SAC_ND_FUNAP2( SACf_C99Benchmarking__printResult__SACt_C99Benchmarking__Interval, SAC_ND_ARG_inout( SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), SACt_Interval__Interval)) SAC_ND_DEC_RC_FREE((SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, ) /* * ND_IDXS2OFFSET_id( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_DEC_RC_FREE((SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, ) SAC_ND_FREE((SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_PRF_S__DATA((SACp_emal_8358__pinl_1816__flat_252, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_TOI, SAC_ND_READ((SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0)) SAC_ND_FREE((SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), (SACp_emal_8358__pinl_1816__flat_252, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), (SACp_emal_8358__pinl_1816__flat_252, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACf__MAIN__main, , 1, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN:_INIT::SACf__MAIN_CL_INIT__init(...) [ body ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACf__MAIN_CL_INIT__init, , 0) */ SAC_ND_DEF_FUN_BEGIN2( SACf__MAIN_CL_INIT__init, void, void) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_World_CL_INIT__init_TheWorld__SACt_World__World, , 1, inout, SACt_World__World, SAC_SET_NT_USG( TFA, (SACo_World__TheWorld, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, )))))))))) */ SAC_ND_FUNAP2( SACf_World_CL_INIT__init_TheWorld__SACt_World__World, SAC_ND_ARG_inout( SAC_SET_NT_USG( TFA, (SACo_World__TheWorld, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, ))))))))), SACt_World__World)) /* * ND_FUN_AP( SACf_C99Benchmarking_CL_INIT__init_TheBenchmarkObject__SACt_C99Benchmarking__C99Benchmarking, , 1, inout, SACt_C99Benchmarking__C99Benchmarking, SAC_SET_NT_USG( TFA, (SACo_C99Benchmarking__TheBenchmarkObject, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, )))))))))) */ SAC_ND_FUNAP2( SACf_C99Benchmarking_CL_INIT__init_TheBenchmarkObject__SACt_C99Benchmarking__C99Benchmarking, SAC_ND_ARG_inout( SAC_SET_NT_USG( TFA, (SACo_C99Benchmarking__TheBenchmarkObject, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, ))))))))), SACt_C99Benchmarking__C99Benchmarking)) /* * ND_FUN_AP( SACf_MTClock_CL_INIT__init_TheMTClock__SACt_MTClock__MTClock, , 1, inout, SACt_MTClock__MTClock, SAC_SET_NT_USG( TFA, (SACo_MTClock__TheMTClock, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, )))))))))) */ SAC_ND_FUNAP2( SACf_MTClock_CL_INIT__init_TheMTClock__SACt_MTClock__MTClock, SAC_ND_ARG_inout( SAC_SET_NT_USG( TFA, (SACo_MTClock__TheMTClock, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, ))))))))), SACt_MTClock__MTClock)) /* * ND_FUN_AP( SACf_Terminal_CL_INIT__init_TheTerminal__SACt_Terminal__Terminal, , 1, inout, SACt_Terminal__Terminal, SAC_SET_NT_USG( TFA, (SACo_Terminal__TheTerminal, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, )))))))))) */ SAC_ND_FUNAP2( SACf_Terminal_CL_INIT__init_TheTerminal__SACt_Terminal__Terminal, SAC_ND_ARG_inout( SAC_SET_NT_USG( TFA, (SACo_Terminal__TheTerminal, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, ))))))))), SACt_Terminal__Terminal)) /* * ND_FUN_AP( SACf_TermFile_CL_INIT__init_stdout__SACt_TermFile__TermFile, , 1, inout, SACt_TermFile__TermFile, SAC_SET_NT_USG( TFA, (SACo_TermFile__stdout, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, )))))))))) */ SAC_ND_FUNAP2( SACf_TermFile_CL_INIT__init_stdout__SACt_TermFile__TermFile, SAC_ND_ARG_inout( SAC_SET_NT_USG( TFA, (SACo_TermFile__stdout, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, ))))))))), SACt_TermFile__TermFile)) /* * ND_FUN_RET( , 0) */ return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACf__MAIN_CL_INIT__init, , 0) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACf__MAIN_CLsacprelude_p__zero__f_1(...) [ body ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACf__MAIN_CLsacprelude_p__zero__f_1, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACf__MAIN_CLsacprelude_p__zero__f_1, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { SAC_ND_DECL_CONST__DATA((SACp_emal_9281__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0.0f) /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, 1) */ const int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0) = 1; const int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 1; const int SAC_ND_A_MIRROR_DIM( (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 1; SAC_INIT_LOCAL_MEM() SAC_ND_DEC_RC_FREE((SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9281__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9281__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACf__MAIN_CLsacprelude_p__zero__f_1, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN::SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1(...) [ body ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, , 4, out, float, (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { SAC_ND_DECL_CONST__DATA((SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1) /* * ND_DECL( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; SAC_ND_DECL_CONST__DATA((SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 2) SAC_ND_DECL_CONST__DATA((SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 3) SAC_ND_DECL_CONST__DATA((SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 4) /* * ND_DECL( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() SAC_ND_DECL_CONST__DATA((SACp_emal_9315__flat_67, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0.0f) /* * ND_DECL( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, -5) */ SAC_ND_DECL__DATA( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 3; /* * ND_DECL( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; /* * ND_DECL( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 5; /* * ND_DECL( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 1; SAC_ND_DECL_CONST__DATA((SACp_emal_9292__cnstass_8342_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_9291__cnstass_8341_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_9290__cnstass_8340_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_9289__cnstass_8339_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_9288__cnstass_8338_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) /* * ND_DECL( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 5; /* * ND_DECL( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; /* * ND_DECL( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 3) */ SAC_ND_DECL__DATA( (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 3; const int SAC_ND_A_MIRROR_SIZE( (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 3; const int SAC_ND_A_MIRROR_DIM( (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 3) */ SAC_ND_DECL__DATA( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 3; const int SAC_ND_A_MIRROR_SIZE( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 3; const int SAC_ND_A_MIRROR_DIM( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL__MIRROR_PARAM( (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, 1) */ const int SAC_ND_A_MIRROR_SHAPE( (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0) = 1; const int SAC_ND_A_MIRROR_SIZE( (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 1; const int SAC_ND_A_MIRROR_DIM( (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 1; /* * ND_DECL__MIRROR_PARAM( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7) */ int SAC_ND_A_MIRROR_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 5; /* * ND_DECL__MIRROR_PARAM( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7) */ int SAC_ND_A_MIRROR_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 5; SAC_INIT_LOCAL_MEM() SAC_ND_ALLOC_BEGIN((SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 19, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 19, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_PRF_SHAPE_A__DATA( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7) */ SAC_TR_PRF_PRINT( ("ND_PRF_SHAPE_A__DATA( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7)")) SAC_ND_WRITE_COPY( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_A_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0), ); SAC_ND_WRITE_COPY( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, SAC_ND_A_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1), ); SAC_ND_WRITE_COPY( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2, SAC_ND_A_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2), ); SAC_ND_WRITE_COPY( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, SAC_ND_A_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3), ); SAC_ND_WRITE_COPY( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4, SAC_ND_A_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4), ); SAC_ND_ALLOC_BEGIN((SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 19, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 19, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_PRF_SHAPE_A__DATA( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7) */ SAC_TR_PRF_PRINT( ("ND_PRF_SHAPE_A__DATA( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7)")) SAC_ND_WRITE_COPY( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0), ); SAC_ND_WRITE_COPY( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1), ); SAC_ND_WRITE_COPY( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2, SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2), ); SAC_ND_WRITE_COPY( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3), ); SAC_ND_WRITE_COPY( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4, SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4), ); /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 149, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 149, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 149, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 149, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_PRF_SxS__DATA((SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_SUB, SAC_ND_READ((SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 149, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_PRF_SxS__DATA((SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_SUB, SAC_ND_READ((SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_PRF_SxS__DATA((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_MAX, 0, SAC_ND_READ((SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 197, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 197, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_COPY__DATA((SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 197, "2nd argument of _idx_modarray_AxSxS_ is not a scalar!"); { int SAC_idx; SAC_idx = SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_ND_WRITE_COPY( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_idx, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) , ) } SAC_ND_PRF_SxS__DATA((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_MAX, 0, SAC_ND_READ((SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 197, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 197, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_COPY__DATA((SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 197, "2nd argument of _idx_modarray_AxSxS_ is not a scalar!"); { int SAC_idx; SAC_idx = SAC_ND_READ( (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_ND_WRITE_COPY( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_idx, SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) , ) } SAC_ND_PRF_SxS__DATA((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_MAX, 0, SAC_ND_READ((SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 197, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 197, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_COPY__DATA((SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 197, "2nd argument of _idx_modarray_AxSxS_ is not a scalar!"); { int SAC_idx; SAC_idx = SAC_ND_READ( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_ND_WRITE_COPY( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_idx, SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) , ) } SAC_ND_PRF_SxS__DATA((SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_MAX, 0, SAC_ND_READ((SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 197, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 197, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_COPY__DATA((SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 197, "2nd argument of _idx_modarray_AxSxS_ is not a scalar!"); { int SAC_idx; SAC_idx = SAC_ND_READ( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_ND_WRITE_COPY( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_idx, SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) , ) } SAC_ND_PRF_SxS__DATA((SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_MAX, 0, SAC_ND_READ((SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 26, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 26, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_PRF_SxS__DATA((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_MIN, SAC_ND_READ((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * ND_COPY__SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 5), 25, "Assignment with incompatible types found!"); { int SAC_size = 1; SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_DESC_SIZE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_MIRROR_SIZE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_size * SAC_ND_A_SIZE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_MIRROR_SIZE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) >= 0), 25, "Array with size <0 found!"); } SAC_ND_ALLOC_END((SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) SAC_ND_ALLOC_BEGIN((SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_PF_BEGIN_WITH(modarray) /* * WL_SCHEDULE__BEGIN( 5) */ { int SAC_WL_MT_SCHEDULE_START( 0); int SAC_WL_MT_SCHEDULE_STOP( 0); int SAC_WL_MT_SCHEDULE_START( 1); int SAC_WL_MT_SCHEDULE_STOP( 1); int SAC_WL_MT_SCHEDULE_START( 2); int SAC_WL_MT_SCHEDULE_STOP( 2); int SAC_WL_MT_SCHEDULE_START( 3); int SAC_WL_MT_SCHEDULE_STOP( 3); int SAC_WL_MT_SCHEDULE_START( 4); int SAC_WL_MT_SCHEDULE_STOP( 4); /* * WL_DECLARE_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ int SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); /* * WL_DEFINE_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ { int SAC_i; SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = 1 * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = 1 * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = 1 * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = 1 * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = 1; } /* * MT_SCHEDULER_BEGIN( 0, 5, 0, SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, 0, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = 0; SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 3) = 0; SAC_WL_MT_SCHEDULE_STOP( 3) = SAC_ND_READ( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 4) = 0; SAC_WL_MT_SCHEDULE_STOP( 4) = SAC_ND_READ( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * WL_INIT_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_WL_MT_SCHEDULE_START( 0) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) + SAC_WL_MT_SCHEDULE_START( 1) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) + SAC_WL_MT_SCHEDULE_START( 2) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) + SAC_WL_MT_SCHEDULE_START( 3) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) + SAC_WL_MT_SCHEDULE_START( 4) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) /* * WL_SET_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, 5, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_WL_MT_SCHEDULE_START( 4) ) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_ND_ALLOC_BEGIN((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * WL_ASSIGN( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == (SAC_ND_A_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) - SAC_ND_A_SIZE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))))), 26, "WL expression with illegal dimension found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4)), 26, "WL expression with illegal size found!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_WL_INC_OFFSET((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) SAC_WL_GRID_UNROLL_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 5, 0, SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, 0, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * MT_SCHEDULER_BEGIN( 0, 5, 0, 0, SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = 0; SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 3) = 0; SAC_WL_MT_SCHEDULE_STOP( 3) = SAC_ND_READ( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 4) = 0; SAC_WL_MT_SCHEDULE_STOP( 4) = SAC_ND_READ( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * WL_INIT_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_WL_MT_SCHEDULE_START( 0) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) + SAC_WL_MT_SCHEDULE_START( 1) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) + SAC_WL_MT_SCHEDULE_START( 2) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) + SAC_WL_MT_SCHEDULE_START( 3) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) + SAC_WL_MT_SCHEDULE_START( 4) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) /* * WL_SET_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, 5, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_WL_MT_SCHEDULE_START( 4) ) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_ND_ALLOC_BEGIN((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * WL_ASSIGN( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == (SAC_ND_A_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) - SAC_ND_A_SIZE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))))), 26, "WL expression with illegal dimension found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4)), 26, "WL expression with illegal size found!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_WL_INC_OFFSET((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) SAC_WL_GRID_UNROLL_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 5, 0, 0, SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * MT_SCHEDULER_BEGIN( 0, 5, 0, 0, 0, SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = 0; SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = 0; SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 3) = SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_STOP( 3) = SAC_ND_READ( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 4) = 0; SAC_WL_MT_SCHEDULE_STOP( 4) = SAC_ND_READ( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * WL_INIT_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_WL_MT_SCHEDULE_START( 0) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) + SAC_WL_MT_SCHEDULE_START( 1) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) + SAC_WL_MT_SCHEDULE_START( 2) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) + SAC_WL_MT_SCHEDULE_START( 3) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) + SAC_WL_MT_SCHEDULE_START( 4) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) /* * WL_SET_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, 5, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_WL_MT_SCHEDULE_START( 4) ) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_ND_ALLOC_BEGIN((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * WL_ASSIGN( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == (SAC_ND_A_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) - SAC_ND_A_SIZE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))))), 26, "WL expression with illegal dimension found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4)), 26, "WL expression with illegal size found!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_WL_INC_OFFSET((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) SAC_WL_GRID_UNROLL_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 5, 0, 0, 0, SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * MT_SCHEDULER_BEGIN( 0, 5, 0, 0, 0, 0, SAC_ND_READ( (SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = 0; SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = 0; SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 3) = 0; SAC_WL_MT_SCHEDULE_STOP( 3) = SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 4) = SAC_ND_READ( (SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_STOP( 4) = SAC_ND_READ( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * WL_INIT_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_WL_MT_SCHEDULE_START( 0) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) + SAC_WL_MT_SCHEDULE_START( 1) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) + SAC_WL_MT_SCHEDULE_START( 2) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) + SAC_WL_MT_SCHEDULE_START( 3) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) + SAC_WL_MT_SCHEDULE_START( 4) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) /* * WL_SET_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, 5, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_WL_MT_SCHEDULE_START( 4) ) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_ND_ALLOC_BEGIN((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * WL_ASSIGN( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == (SAC_ND_A_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) - SAC_ND_A_SIZE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))))), 26, "WL expression with illegal dimension found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4)), 26, "WL expression with illegal size found!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_WL_INC_OFFSET((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) SAC_WL_GRID_UNROLL_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 5, 0, 0, 0, 0, SAC_ND_READ( (SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * MT_SCHEDULER_BEGIN( 0, 5, 0, 0, 0, 0, 0, SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = 0; SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = 0; SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 3) = 0; SAC_WL_MT_SCHEDULE_STOP( 3) = SAC_ND_READ( (SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 4) = 0; SAC_WL_MT_SCHEDULE_STOP( 4) = SAC_ND_READ( (SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * WL_INIT_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_WL_MT_SCHEDULE_START( 0) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) + SAC_WL_MT_SCHEDULE_START( 1) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) + SAC_WL_MT_SCHEDULE_START( 2) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) + SAC_WL_MT_SCHEDULE_START( 3) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) + SAC_WL_MT_SCHEDULE_START( 4) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) /* * WL_SET_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, 5, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_WL_MT_SCHEDULE_START( 4) ) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_ND_INC_RC((SACp_emal_9315__flat_67, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) SAC_ND_ALLOC_BEGIN((SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 3, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3, SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 3), 26, "Assignment with incompatible types found!"); { int SAC_size = 1; SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_ND_A_DESC_SIZE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_MIRROR_SIZE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_size; SAC_ASSURE_TYPE_LINE ((SAC_ND_A_MIRROR_SIZE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) >= 0), 26, "Array with size <0 found!"); } SAC_ND_ALLOC_END((SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 3, float) SAC_ND_ALLOC_BEGIN((SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 26, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 26, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 26, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 26, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_PF_BEGIN_WITH(genarray) /* * WL_SCHEDULE__BEGIN( 3) */ { int SAC_WL_MT_SCHEDULE_START( 0); int SAC_WL_MT_SCHEDULE_STOP( 0); int SAC_WL_MT_SCHEDULE_START( 1); int SAC_WL_MT_SCHEDULE_STOP( 1); int SAC_WL_MT_SCHEDULE_START( 2); int SAC_WL_MT_SCHEDULE_STOP( 2); /* * WL_DECLARE_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) */ int SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); /* * WL_DEFINE_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) */ { int SAC_i; SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = 1 * SAC_ND_A_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_A_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = 1 * SAC_ND_A_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = 1; } /* * MT_SCHEDULER_BEGIN( 0, 3, 0, 0, 0, SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = 0; SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = 0; SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * WL_INIT_OFFSET( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) */ SAC_ND_WRITE( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_WL_MT_SCHEDULE_START( 0) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) + SAC_WL_MT_SCHEDULE_START( 1) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) + SAC_WL_MT_SCHEDULE_START( 2) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) /* * WL_SET_OFFSET( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 3, (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_READ( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_WL_MT_SCHEDULE_START( 2) ) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_ND_ALLOC_BEGIN((SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_PRF_SxS__DATA((SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_ADD, SAC_ND_READ((SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_PRF_SxS__DATA((SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_ADD, SAC_ND_READ((SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_PRF_SxS__DATA((SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_MUL, SAC_ND_READ((SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), SAC_ND_READ((SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0)) SAC_ND_FREE((SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) /* * WL_ASSIGN( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == (SAC_ND_A_DIM( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) - SAC_ND_A_SIZE( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))))), 26, "WL expression with illegal dimension found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2)), 26, "WL expression with illegal size found!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_WL_INC_OFFSET((SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) SAC_WL_GRID_UNROLL_END(2, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 3, 0, 0, 0, SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * WL_SCHEDULE__END( 3) */ } SAC_PF_END_WITH(genarray) SAC_ND_LABEL(_comp_9407_SAC_label) SAC_ND_FREE((SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 629, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 629, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 629, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_ASSIGN( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9315__flat_67, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 629, "Assignment with incompatible types found!"); SAC_NOOP() SAC_NOOP() SAC_NOOP() SAC_ND_ASSIGN__DATA( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9315__flat_67, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_PF_BEGIN_WITH(fold) /* * WL_SCHEDULE__BEGIN( 3) */ { int SAC_WL_MT_SCHEDULE_START( 0); int SAC_WL_MT_SCHEDULE_STOP( 0); int SAC_WL_MT_SCHEDULE_START( 1); int SAC_WL_MT_SCHEDULE_STOP( 1); int SAC_WL_MT_SCHEDULE_START( 2); int SAC_WL_MT_SCHEDULE_STOP( 2); /* * MT_SCHEDULER_BEGIN( 0, 3, 0, 0, 0, SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = 0; SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = 0; SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_NOOP() SAC_ND_ALLOC_BEGIN((SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 629, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_arr( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_ND_WRITE( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) * ( SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) * SAC_ND_READ( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 629, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 629, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_PRF_SxS__DATA((SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_ADD, SAC_ND_READ((SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), SAC_ND_READ((SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0)) SAC_ND_DEC_RC_FREE((SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, ) /* * ND_ASSIGN( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 117, "Assignment with incompatible types found!"); SAC_NOOP() SAC_NOOP() SAC_NOOP() SAC_ND_ASSIGN__DATA( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) /* * WL_FOLD( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_TR_WL_PRINT( ("index vector [%d, %d, %d] -- fold", SAC_ND_READ( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); /* fold operation */ SAC_WL_GRID_UNROLL_END(2, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 3, 0, 0, 0, SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * WL_SCHEDULE__END( 3) */ } SAC_PF_END_WITH(fold) SAC_ND_LABEL(_comp_9408_SAC_label) SAC_ND_FREE((SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * WL_ASSIGN( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == (SAC_ND_A_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) - SAC_ND_A_SIZE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))))), 1, "WL expression with illegal dimension found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4)), 1, "WL expression with illegal size found!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_DEC_RC_FREE((SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, ) SAC_WL_INC_OFFSET((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) SAC_WL_GRID_UNROLL_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 5, 0, 0, 0, 0, 0, SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * WL_SCHEDULE__END( 5) */ } SAC_PF_END_WITH(modarray) SAC_ND_LABEL(_comp_9409_SAC_label) SAC_ND_FREE((SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9315__flat_67, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_DEC_RC_FREE((SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) SAC_ND_DEC_RC_FREE((SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_DEC_RC_FREE((SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) SAC_ND_FREE((SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_CUDA_ALLOC_BEGIN((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) /* * ND_COPY__SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), -7, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == 5), 152, "Assignment with incompatible types found!"); { int SAC_size = 1; SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_DESC_SIZE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = SAC_ND_A_MIRROR_SIZE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = SAC_size * SAC_ND_A_SIZE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_MIRROR_SIZE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) >= 0), 152, "Array with size <0 found!"); } SAC_CUDA_ALLOC_END((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) /* * CUDA_MEM_TRANSFER( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, hipMemcpyHostToDevice) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == SAC_ND_A_SIZE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))))), 152, "hipMemcpy: Destionation and source arrays should have equal sizes!"); SAC_CUDA_MEM_TRANSFER((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, hipMemcpyHostToDevice) SAC_ND_FREE((SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_CUDA_ALLOC_BEGIN((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == 1), 19, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) == 5), 19, "Assignment with incompatible types found!"); SAC_NOOP() SAC_CUDA_ALLOC_END((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 1, int) /* * CUDA_MEM_TRANSFER( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, hipMemcpyHostToDevice) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == SAC_ND_A_SIZE( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))), 19, "hipMemcpy: Destionation and source arrays should have equal sizes!"); SAC_CUDA_MEM_TRANSFER((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, hipMemcpyHostToDevice) SAC_ND_FREE((SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_CUDA_ALLOC_BEGIN((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 5, SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == 5), 28, "Assignment with incompatible types found!"); { int SAC_size = 1; SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = SAC_ND_READ( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = SAC_ND_READ( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_ND_A_DESC_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = SAC_ND_A_MIRROR_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = SAC_size; SAC_ASSURE_TYPE_LINE ((SAC_ND_A_MIRROR_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) >= 0), 28, "Array with size <0 found!"); } SAC_CUDA_ALLOC_END((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) SAC_ND_ALLOC_BEGIN((SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 28, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_ALLOC_BEGIN((SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * CUDA_GRID_BLOCK( 15, SAC_ND_READ( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9292__cnstass_8342_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9291__cnstass_8341_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9290__cnstass_8340_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9289__cnstass_8339_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9288__cnstass_8338_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, 0, 0, 0) */ { dim3 grid((SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_9289__cnstass_8339_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_9288__cnstass_8338_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); if (grid.x > 2147483647 || grid.y > 65535 || grid.z > 65535) { SAC_RuntimeError("CUDA XYZ grid dimension exceeds compute compatibilities max value: 2147483647 x 65535 x 65535"); } dim3 block((SAC_ND_READ( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_9292__cnstass_8342_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_9291__cnstass_8341_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_9290__cnstass_8340_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); if (block.x > 2147483647 || block.y > 65535 || block.z > 65535) { SAC_RuntimeError("CUDA XYZ block dimension exceeds compute compatibilities max value: 2147483647 x 65535 x 65535"); } /* * CUDA_GLOBALFUN_AP( SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X, 14, inout, float, -7, SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9292__cnstass_8342_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9291__cnstass_8341_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9290__cnstass_8340_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9289__cnstass_8339_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9288__cnstass_8338_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, float, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), in, float, -7, SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, int, 1, SAC_SET_NT_USG( FAG, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))) */ hipLaunchKernelGGL(( SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X), dim3(grid), dim3(block), 0, 0, SAC_CUDA_ARG_inout( SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), float), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 0), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 1), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 2), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 3), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 4), SAC_ND_A_MIRROR_SIZE(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_ND_A_MIRROR_DIM(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9292__cnstass_8342_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9291__cnstass_8341_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9290__cnstass_8340_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9289__cnstass_8339_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9288__cnstass_8338_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), float), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 0), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 1), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 2), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 3), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 4), SAC_ND_A_MIRROR_SIZE(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_ND_A_MIRROR_DIM(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), int), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 0), SAC_ND_A_MIRROR_SIZE(SAC_SET_NT_USG( FAG, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_ND_A_MIRROR_DIM(SAC_SET_NT_USG( FAG, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))))); SAC_CUDA_GET_LAST_KERNEL_ERROR(); } /* * ND_REFRESH__MIRROR( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), -7) */ SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4); SAC_ND_A_MIRROR_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))); SAC_ND_FREE((SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9292__cnstass_8342_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9291__cnstass_8341_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9290__cnstass_8340_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9289__cnstass_8339_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9288__cnstass_8338_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_CUDA_FREE((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) SAC_CUDA_FREE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * ND_COPY__SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), -7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 5), 28, "Assignment with incompatible types found!"); { int SAC_size = 1; SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4); SAC_ND_A_DESC_SIZE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_MIRROR_SIZE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_size * SAC_ND_A_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_MIRROR_SIZE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) >= 0), 28, "Array with size <0 found!"); } SAC_ND_ALLOC_END((SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * CUDA_MEM_TRANSFER( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, hipMemcpyDeviceToHost) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_ND_A_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), 28, "hipMemcpy: Destionation and source arrays should have equal sizes!"); SAC_CUDA_MEM_TRANSFER((SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, hipMemcpyDeviceToHost) SAC_CUDA_FREE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, , 4, out, float, (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACf__MAIN_CLsacprelude_p__zero__f_X(...) [ body ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACf__MAIN_CLsacprelude_p__zero__f_X, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACf__MAIN_CLsacprelude_p__zero__f_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { SAC_ND_DECL_CONST__DATA((SACp_emal_9357__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0.0f) /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -3) */ int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 1; SAC_INIT_LOCAL_MEM() SAC_ND_DEC_RC_FREE((SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9357__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9357__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACf__MAIN_CLsacprelude_p__zero__f_X, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACf__MAIN_CLsacprelude_p__zero__f_X_X_X_X_X(...) [ body ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACf__MAIN_CLsacprelude_p__zero__f_X_X_X_X_X, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACf__MAIN_CLsacprelude_p__zero__f_X_X_X_X_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { SAC_ND_DECL_CONST__DATA((SACp_emal_9358__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0.0f) /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7) */ int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 5; SAC_INIT_LOCAL_MEM() SAC_ND_DEC_RC_FREE((SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9358__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9358__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACf__MAIN_CLsacprelude_p__zero__f_X_X_X_X_X, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN::SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X(...) [ body ] ****************************************************************************/ /* * CUDA_GLOBALFUN_DEF_BEGIN( SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X, 14, inout, float, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), -7, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, float, (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, in, float, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), -7, in, int, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 1) */ __global__ void SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X(SAC_CUDA_PARAM_inout( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_CUDA_PARAM_in( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), int), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SIZE((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))))){ { /* * ND_DECL( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * CUDA_DECL_KERNEL_ARRAY( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ int SAC_ND_A_FIELD( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))[5]; SAC_ND_DECL__DESC( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; SAC_INIT_LOCAL_MEM() /* * CUDA_WLIDS( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 4, (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_X, SACp_step_4, SACp_width_4, SACp_lb_4, SACp_ub_4) SAC_ND_WRITE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) = SAC_ND_READ( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 3, (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_Y, SACp_step_3, SACp_width_3, SACp_lb_3, SACp_ub_3) SAC_ND_WRITE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) = SAC_ND_READ( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 2, (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_Z, SACp_step_2, SACp_width_2, SACp_lb_2, SACp_ub_2) SAC_ND_WRITE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) = SAC_ND_READ( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 1, (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, BLOCKIDX_X, SACp_step_1, SACp_width_1, SACp_lb_1, SACp_ub_1) SAC_ND_WRITE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) = SAC_ND_READ( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 0, (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, BLOCKIDX_Y, SACp_step_0, SACp_width_0, SACp_lb_0, SACp_ub_0) SAC_ND_WRITE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_ND_READ( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * ND_ARRAY_IDXS2OFFSET_id( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4) * ( SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3) * ( SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1) * SAC_ND_READ( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); /* * ND_IDXS2OFFSET_id( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 1) * SAC_ND_READ( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), -7, (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), -7, (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), SAC_ND_READ( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_PRF_SxS__DATA((SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_MUL, SAC_ND_READ((SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0), SAC_ND_READ((SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0)) /* * CUDA_WL_ASSIGN( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), -7, (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE_READ_COPY( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SAC_ND_READ( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOOP() SAC_CLEANUP_LOCAL_MEM() } /* * CUDA_GLOBALFUN_DEF_END( SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X, 14, inout, float, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), -7, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, float, (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, in, float, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), -7, in, int, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 1) */ } /**************************************************************************** * WITH-loop Count: 0 * _MAIN::SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f(...) [ body ] ****************************************************************************/ /* * CUDA_GLOBALFUN_DEF_BEGIN( SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, inout, float, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0) */ __global__ void SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f(SAC_CUDA_PARAM_in( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_CUDA_PARAM_inout( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)){ { /* * ND_DECL( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * CUDA_DECL_KERNEL_ARRAY( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ int SAC_ND_A_FIELD( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))[5]; SAC_ND_DECL__DESC( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; SAC_INIT_LOCAL_MEM() /* * CUDA_WLIDS( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 4, (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_X, SACp_step_4, SACp_width_4, SACp_lb_4, SACp_ub_4) SAC_ND_WRITE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) = SAC_ND_READ( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 3, (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_Y, SACp_step_3, SACp_width_3, SACp_lb_3, SACp_ub_3) SAC_ND_WRITE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) = SAC_ND_READ( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 2, (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_Z, SACp_step_2, SACp_width_2, SACp_lb_2, SACp_ub_2) SAC_ND_WRITE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) = SAC_ND_READ( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 1, (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, BLOCKIDX_X, SACp_step_1, SACp_width_1, SACp_lb_1, SACp_ub_1) SAC_ND_WRITE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) = SAC_ND_READ( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 0, (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, BLOCKIDX_Y, SACp_step_0, SACp_width_0, SACp_lb_0, SACp_ub_0) SAC_ND_WRITE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_ND_READ( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * ND_IDXS2OFFSET_arr( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, 32, 32, 32, 7, 7) */ SAC_ND_WRITE( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( 7 * ( 7 * ( 32 * ( 32 * SAC_ND_READ( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); /* * CUDA_WL_ASSIGN( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE_READ_COPY( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SAC_ND_READ( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOOP() SAC_CLEANUP_LOCAL_MEM() } /* * CUDA_GLOBALFUN_DEF_END( SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, inout, float, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0) */ } /**************************************************************************** * WITH-loop Count: 0 * _MAIN::SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f(...) [ body ] ****************************************************************************/ /* * CUDA_GLOBALFUN_DEF_BEGIN( SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, inout, float, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0) */ __global__ void SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f(SAC_CUDA_PARAM_in( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_CUDA_PARAM_inout( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)){ { /* * ND_DECL( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * CUDA_DECL_KERNEL_ARRAY( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ int SAC_ND_A_FIELD( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))[5]; SAC_ND_DECL__DESC( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; SAC_INIT_LOCAL_MEM() /* * CUDA_WLIDS( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 4, (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_X, SACp_step_4, SACp_width_4, SACp_lb_4, SACp_ub_4) SAC_ND_WRITE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) = SAC_ND_READ( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 3, (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_Y, SACp_step_3, SACp_width_3, SACp_lb_3, SACp_ub_3) SAC_ND_WRITE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) = SAC_ND_READ( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 2, (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_Z, SACp_step_2, SACp_width_2, SACp_lb_2, SACp_ub_2) SAC_ND_WRITE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) = SAC_ND_READ( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 1, (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, BLOCKIDX_X, SACp_step_1, SACp_width_1, SACp_lb_1, SACp_ub_1) SAC_ND_WRITE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) = SAC_ND_READ( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 0, (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, BLOCKIDX_Y, SACp_step_0, SACp_width_0, SACp_lb_0, SACp_ub_0) SAC_ND_WRITE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_ND_READ( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * ND_IDXS2OFFSET_arr( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, 32, 32, 32, 7, 7) */ SAC_ND_WRITE( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( 7 * ( 7 * ( 32 * ( 32 * SAC_ND_READ( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); /* * CUDA_WL_ASSIGN( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE_READ_COPY( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SAC_ND_READ( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOOP() SAC_CLEANUP_LOCAL_MEM() } /* * CUDA_GLOBALFUN_DEF_END( SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, inout, float, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0) */ } /* * stubs for SACARGfreeDataUdt and SACARGcopyDataUdt */ extern "C" void SACARGfreeDataUdt( int, void *); extern "C" void *SACARGcopyDataUdt( int, int, void *); void SACARGfreeDataUdt( int size, void *data) {} void *SACARGcopyDataUdt( int type, int size, void *data) { return ((void *) 0x0); } int main( int __argc, char *__argv[]) { SAC_ND_DECL__DATA( (SAC_res, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SAC_res, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() SAC_MT_SETUP_INITIAL(); SAC_PF_SETUP(); SAC_HM_SETUP(); SAC_MT_SETUP(); SAC_CS_SETUP(); SAC_COMMANDLINE_SET( __argc, __argv); SAC_INVOKE_MAIN_FUN( SACf__MAIN__main, SAC_ND_ARG_out( (SAC_res, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int)); SAC_DISTMEM_BARRIER(); SAC_PF_PRINT(); SAC_CS_FINALIZE(); SAC_MT_FINALIZE(); SAC_HM_PRINT(); return( SAC_ND_READ( (SAC_res, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)); }
d41f3feefe65e48781cdc1c9f70f6a0f81fe800d.cu
/* * Global Switches */ #define SAC_DO_CHECK 0 #define SAC_DO_CHECK_TYPE 0 #define SAC_DO_CHECK_GPU 0 #define SAC_DO_CHECK_BOUNDARY 0 #define SAC_DO_CHECK_MALLOC 0 #define SAC_DO_CHECK_ERRNO 0 #define SAC_DO_CHECK_HEAP 0 #define SAC_DO_CHECK_DISTMEM 0 #define SAC_DO_CHECK_DISTMEMPHM 0 #define SAC_DO_PHM 0 #define SAC_DO_APS 0 #define SAC_DO_DAO 0 #define SAC_DO_MSCA 0 #define SAC_DO_PROFILE 0 #define SAC_DO_PROFILE_WITH 0 #define SAC_DO_PROFILE_FUN 0 #define SAC_DO_PROFILE_INL 0 #define SAC_DO_PROFILE_LIB 0 #define SAC_DO_PROFILE_DISTMEM 0 #define SAC_DO_TRACE 0 #define SAC_DO_TRACE_REF 0 #define SAC_DO_TRACE_MEM 0 #define SAC_DO_TRACE_PRF 0 #define SAC_DO_TRACE_FUN 0 #define SAC_DO_TRACE_WL 0 #define SAC_DO_TRACE_AA 0 #define SAC_DO_TRACE_MT 0 #define SAC_DO_TRACE_RTSPEC 0 #define SAC_DO_TRACE_DISTMEM 0 #define SAC_DO_CACHESIM 0 #define SAC_DO_CACHESIM_ADV 0 #define SAC_DO_CACHESIM_GLOBAL 1 #define SAC_DO_CACHESIM_FILE 0 #define SAC_DO_CACHESIM_PIPE 0 #define SAC_DO_CACHESIM_IMDT 1 #define SAC_DO_MULTITHREAD 0 #define SAC_DO_MT_PTHREAD 0 #define SAC_DO_MT_LPEL 0 #define SAC_DO_MT_OMP 0 #define SAC_DO_DISTMEM 0 #define SAC_DO_DISTMEM_GASNET 0 #define SAC_DO_DISTMEM_GPI 0 #define SAC_DO_DISTMEM_MPI 0 #define SAC_DO_DISTMEM_ARMCI 0 #define SAC_DO_DISTMEM_ALLOC_CACHE_OUTSIDE_DSM 0 #define SAC_DO_DISTMEM_PTR_DESC 0 #define SAC_DO_DISTMEM_PTR_CACHE 0 #define SAC_DO_THREADS_STATIC 1 #define SAC_DO_FP 0 #define SAC_DO_MT_CREATE_JOIN 0 #define SAC_DEBUG_RC 0 /* * Global Settings */ #define SAC_FORCE_DESC_SIZE -1 /* * MUTC Backend Specific Switches */ #define SAC_MUTC_FUNAP_AS_CREATE 0 #define SAC_MUTC_THREAD_MALLOC 0 #define SAC_MUTC_DISABLE_THREAD_MEM 0 #define SAC_MUTC_BENCH 0 #define SAC_MUTC_MACROS 0 #define SAC_MUTC_RC_PLACES 1 #define SAC_MUTC_RC_INDIRECT 0 #define SAC_MUTC_SEQ_DATA_PARALLEL 0 #define SAC_MUTC_FORCE_SPAWN_FLAGS #define SAC_CUDA_MACROS 1 #define SAC_OMP_MACROS 0 #define SAC_DO_COMPILE_MODULE 0 #define SAC_C_EXTERN extern "C" /* * Global Settings */ #ifndef NULL # ifdef __cplusplus # define NULL 0 # else # define NULL (void*) 0 # endif #endif #define SAC_SET_TMPDIR "/tmp" #define SAC_SET_INITIAL_MASTER_HEAPSIZE 1048576 #define SAC_SET_INITIAL_WORKER_HEAPSIZE 65536 #define SAC_SET_INITIAL_UNIFIED_HEAPSIZE 0 #ifndef SAC_SET_RTSPEC_THREADS #define SAC_SET_RTSPEC_THREADS 1 #endif #ifndef SAC_SET_MTMODE #define SAC_SET_MTMODE 0 #endif #define SAC_SET_CPU_BIND_STRATEGY 0 #define SAC_SET_BARRIER_TYPE 0 #define SAC_SET_SMART_DECISIONS 0 #define SAC_SET_SMART_FILENAME "default" #define SAC_SET_SMART_ARCH "(null)" #define SAC_SET_SMART_PERIOD 500 #ifndef SAC_SET_THREADS_MAX #define SAC_SET_THREADS_MAX 128 #endif #ifndef SAC_SET_THREADS #define SAC_SET_THREADS 1 #endif #ifndef SAC_OMP_ACTIVE_LEVEL #define SAC_OMP_ACTIVE_LEVEL 1 #endif #ifndef SAC_SET_MASTERCLASS #define SAC_SET_MASTERCLASS 0 #endif #define SAC_SET_NUM_SCHEDULERS 0 #define SAC_SET_CACHE_1_SIZE -1 #define SAC_SET_CACHE_1_LINE 4 #define SAC_SET_CACHE_1_ASSOC 1 #define SAC_SET_CACHE_1_WRITEPOL SAC_CS_default #define SAC_SET_CACHE_1_MSCA_FACTOR 0.00 #define SAC_SET_CACHE_2_SIZE -1 #define SAC_SET_CACHE_2_LINE 4 #define SAC_SET_CACHE_2_ASSOC 1 #define SAC_SET_CACHE_2_WRITEPOL SAC_CS_default #define SAC_SET_CACHE_2_MSCA_FACTOR 0.00 #define SAC_SET_CACHE_3_SIZE -1 #define SAC_SET_CACHE_3_LINE 4 #define SAC_SET_CACHE_3_ASSOC 1 #define SAC_SET_CACHE_3_WRITEPOL SAC_CS_default #define SAC_SET_CACHE_3_MSCA_FACTOR 0.00 #define SAC_SET_CACHESIM_HOST "" #define SAC_SET_CACHESIM_FILE "gconv2.cs" #define SAC_SET_CACHESIM_DIR "/tmp" #define SAC_SET_MAXFUN 0 #define SAC_SET_MAXFUNAP 1 #define SBLOCKSZ 16 #define LBLOCKSZ 256 /* * Includes */ #include "sac.h" #if SAC_OMP_MACROS #include "omp.h" #endif #if SAC_CUDA_MACROS #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #endif /* * SAC-Program gconv2.sac : */ /* Additional headers for external function declarations */ #include <stdlib.h> /* * type definitions */ SAC_ND_TYPEDEF((SACt_sacprelude_p__SACarg, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_Random__Random, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_ComplexBasics__complex, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, )))))))))), double) SAC_ND_TYPEDEF((SACt_Complex__complex, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (DOU, )))))))))), double) SAC_ND_TYPEDEF((SACt_World__World, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_C99Benchmarking__C99Benchmarking, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_String__string, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_Interval__Interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_C99Benchmarking__Interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval) SAC_ND_TYPEDEF((SACt_MTClock__MTClock, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_Terminal__Terminal, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) SAC_ND_TYPEDEF((SACt_TermFile__TermFile, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SAC_hidden) /* * Global Definitions */ SAC_MT_DEFINE() SAC_PF_DEFINE() SAC_HM_DEFINE() /* * prototypes for externals (FUNDECS) */ SAC_C_EXTERN /* * ND_FUN_DECL( SACrandom, (int, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2, in_nodesc, int, (SACl_MIN, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in_nodesc, int, (SACl_MAX, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACrandom, SAC_ND_TYPE_NT( (int, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), SAC_ND_PARAM_in_nodesc( (SACl_MIN, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in_nodesc( (SACl_MAX, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( to_string, , 3, out, SACt_String__string, (SAC_arg_1, (SCL, (HID, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), in, unsigned char, (SACl_A, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in_nodesc, int, (SACl_LENGTH, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( to_string, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (HID, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), SACt_String__string), SAC_ND_PARAM_in( (SACl_A, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in_nodesc( (SACl_LENGTH, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); /* * prototypes for locals (FUNDEFS) */ SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__partitionSlicer__i_S__i_S__i_S__i_S__i_S, , 9, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_4, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_min, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_max, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_axis, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_lb, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ub, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__partitionSlicer__i_S__i_S__i_S__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_4, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_min, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_max, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_axis, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_lb, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ub, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__partitionIntersectMax__i_S__i_S, , 3, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_PWLbound1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ivmin, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__partitionIntersectMax__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_PWLbound1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ivmin, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__isPartitionIntersectNull__i_S__i_S__i_S__i_S, , 5, out, bool, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idxmin, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idxmax, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound2, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__isPartitionIntersectNull__i_S__i_S__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idxmin, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idxmax, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound2, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selSxADistmemLocal__i_S__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__partitionIntersectMin__i_S__i_S, , 3, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_PWLbound2, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ivmax, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__partitionIntersectMin__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_PWLbound2, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ivmax, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__adjustLacFunParams__bl_S__i_S__i_S, , 4, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_iv, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__adjustLacFunParams__bl_S__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_iv, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN__main, , 1, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN__main, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__prod__i_S, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_v, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__prod__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_v, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__adjustLacFunParamsReshape__bl_S__i_S__i_S__i_S, , 5, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_iv, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_shp, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__adjustLacFunParamsReshape__bl_S__i_S__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_iv, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_shp, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__i_S__i_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__f_S__f_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, float, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__f_S__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__d_S__d_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, double, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, double, (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__d_S__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__bl_S__bl_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__bl_S__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__c_S__c_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, unsigned char, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, unsigned char, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__c_S__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__b_S__b_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, byte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, byte, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__b_S__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__s_S__s_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, short, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, short, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__s_S__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__l_S__l_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, long, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, long, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__l_S__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__ll_S__ll_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, longlong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, longlong, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__ll_S__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__ub_S__ub_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ubyte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, ubyte, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__ub_S__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__us_S__us_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ushort, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, ushort, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__us_S__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__ui_S__ui_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, uint, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, uint, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__ui_S__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__ul_S__ul_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ulong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, ulong, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__ul_S__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__eq__ull_S__ull_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ulonglong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, ulonglong, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__eq__ull_S__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__gridFiller__i_S__i_S__i_S__i_S__i_S, , 8, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_lb, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ub, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_wdth, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_dim, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_maxwidth, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__gridFiller__i_S__i_S__i_S__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_lb, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ub, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_wdth, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_dim, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_maxwidth, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__isPartitionIntersect1Part__i_S__i_S__i_S__i_S, , 5, out, bool, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idxmin, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idxmax, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound2, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__isPartitionIntersect1Part__i_S__i_S__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idxmin, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idxmax, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound2, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__i_S, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__d_S, , 2, out, double, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, double, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__f_S, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__c_S, , 2, out, unsigned char, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, unsigned char, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__bl_S, , 2, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__b_S, , 2, out, byte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, byte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__s_S, , 2, out, short, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, short, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__l_S, , 2, out, long, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, long, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__ll_S, , 2, out, longlong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, longlong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__ub_S, , 2, out, ubyte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, ubyte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__us_S, , 2, out, ushort, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, ushort, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__ui_S, , 2, out, uint, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, uint, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__ul_S, , 2, out, ulong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, ulong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf__MAIN_CLsacprelude_p__zero__ull_S, , 2, out, ulonglong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, ulonglong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf__MAIN_CLsacprelude_p__zero__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__partitionMax__i_S__i_S, , 3, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_x, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_y, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__partitionMax__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_x, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_y, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__partitionMin__i_S__i_S, , 3, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_x, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_y, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__partitionMin__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_x, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_y, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___PL_PL__i_S, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_a, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___PL_PL__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_a, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p__sel__i_S__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p__sel__i_S__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACwf_sacprelude_p___selVxADistmemLocal__i_S__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf__MAIN__main, , 1, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf__MAIN__main, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf__MAIN_CL_INIT__init, , 0) */ SAC_ND_DECL_FUN2( SACf__MAIN_CL_INIT__init, void, void); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_ScalarArith___PL__f__f, , 3, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_B, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_ScalarArith___PL__f__f, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_B, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__ull_S, , 2, out, ulonglong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, ulonglong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__ul_S, , 2, out, ulong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, ulong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__ui_S, , 2, out, uint, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, uint, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__us_S, , 2, out, ushort, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, ushort, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__ub_S, , 2, out, ubyte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, ubyte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__ll_S, , 2, out, longlong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, longlong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__l_S, , 2, out, long, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, long, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__s_S, , 2, out, short, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, short, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__b_S, , 2, out, byte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, byte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__bl_S, , 2, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__c_S, , 2, out, unsigned char, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, unsigned char, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__f_S, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__d_S, , 2, out, double, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, double, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__zero__i_S, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__zero__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_World_CL_INIT__init_TheWorld__SACt_World__World, , 1, inout, SACt_World__World, (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_World_CL_INIT__init_TheWorld__SACt_World__World, void, SAC_ND_PARAM_inout( (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_World__World)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_C99Benchmarking_CL_INIT__init_TheBenchmarkObject__SACt_C99Benchmarking__C99Benchmarking, , 1, inout, SACt_C99Benchmarking__C99Benchmarking, (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_C99Benchmarking_CL_INIT__init_TheBenchmarkObject__SACt_C99Benchmarking__C99Benchmarking, void, SAC_ND_PARAM_inout( (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_C99Benchmarking__C99Benchmarking)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_C99Benchmarking__getInterval__SACt_String__string__i__i, , 4, out, SACt_Interval__Interval, (SAC_arg_1, (SCL, (HID, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), in, SACt_String__string, (SACl_interval_name, (SCL, (HID, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), in, int, (SACl_interval_number, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_unit_time, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_C99Benchmarking__getInterval__SACt_String__string__i__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (HID, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval), SAC_ND_PARAM_in( (SACl_interval_name, (SCL, (HID, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), SACt_String__string), SAC_ND_PARAM_in( (SACl_interval_number, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_unit_time, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_MTClock_CL_INIT__init_TheMTClock__SACt_MTClock__MTClock, , 1, inout, SACt_MTClock__MTClock, (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_MTClock_CL_INIT__init_TheMTClock__SACt_MTClock__MTClock, void, SAC_ND_PARAM_inout( (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_MTClock__MTClock)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_C99Benchmarking__start__SACt_C99Benchmarking__Interval, , 1, inout, SACt_Interval__Interval, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_C99Benchmarking__start__SACt_C99Benchmarking__Interval, void, SAC_ND_PARAM_inout( (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_C99Benchmarking__end__SACt_C99Benchmarking__Interval, , 1, inout, SACt_Interval__Interval, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_C99Benchmarking__end__SACt_C99Benchmarking__Interval, void, SAC_ND_PARAM_inout( (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_Terminal_CL_INIT__init_TheTerminal__SACt_Terminal__Terminal, , 1, inout, SACt_Terminal__Terminal, (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_Terminal_CL_INIT__init_TheTerminal__SACt_Terminal__Terminal, void, SAC_ND_PARAM_inout( (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_Terminal__Terminal)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_TermFile_CL_INIT__init_stdout__SACt_TermFile__TermFile, , 1, inout, SACt_TermFile__TermFile, (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_TermFile_CL_INIT__init_stdout__SACt_TermFile__TermFile, void, SAC_ND_PARAM_inout( (SACp_OI_object, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_TermFile__TermFile)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_C99Benchmarking__printResult__SACt_C99Benchmarking__Interval, , 1, inout, SACt_Interval__Interval, (SACl_int1, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_C99Benchmarking__printResult__SACt_C99Benchmarking__Interval, void, SAC_ND_PARAM_inout( (SACl_int1, (SCL, (HID, (NUQ, (INT, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__sel__i_X__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__sel__i_X__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_1__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_1__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selVxADistmemLocal__i_X__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selVxADistmemLocal__i_X__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__ull_S, , 3, out, ulonglong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulonglong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__ul_S, , 3, out, ulong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ulong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__ui_S, , 3, out, uint, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, uint, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__us_S, , 3, out, ushort, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ushort, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__ub_S, , 3, out, ubyte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, ubyte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__ll_S, , 3, out, longlong, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, longlong, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__l_S, , 3, out, long, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, long, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__s_S, , 3, out, short, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, short, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__b_S, , 3, out, byte, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, byte, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__c_S, , 3, out, unsigned char, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, unsigned char, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__bl_S, , 3, out, bool, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__d_S, , 3, out, double, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, double, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__f_S, , 3, out, float, (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, float, (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___selSxADistmemLocal__i__i_S, , 3, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___selSxADistmemLocal__i__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idx, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_array, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__ull_S__ull_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ulonglong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, ulonglong, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__ull_S__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__ul_S__ul_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ulong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, ulong, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__ul_S__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__ui_S__ui_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, uint, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, uint, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__ui_S__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__us_S__us_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ushort, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, ushort, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__us_S__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__ub_S__ub_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, ubyte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, ubyte, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__ub_S__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__ll_S__ll_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, longlong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, longlong, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__ll_S__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__l_S__l_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, long, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, long, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__l_S__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__s_S__s_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, short, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, short, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__s_S__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__b_S__b_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, byte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, byte, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__b_S__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__c_S__c_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, unsigned char, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, unsigned char, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__c_S__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__bl_S__bl_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__bl_S__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__d_S__d_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, double, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, double, (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__d_S__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__f_S__f_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, float, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__f_S__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__eq__i_S__i_S, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__eq__i_S__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_B, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__adjustLacFunParams__bl_S__i_S__i_X, , 4, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_iv, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__adjustLacFunParams__bl_S__i_S__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_iv, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__adjustLacFunParamsReshape__bl_S__i_S__i_X__i_X, , 5, out, int, (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, bool, (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_iv, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_shp, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__adjustLacFunParamsReshape__bl_S__i_S__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_p, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_i, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_iv, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_shp, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__isPartitionIntersectNull__i_X__i_X__i_X__i_X, , 5, out, bool, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idxmin, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idxmax, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__isPartitionIntersectNull__i_X__i_X__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idxmin, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idxmax, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__isPartitionIntersectNull__i__i__i_X__i_X, , 5, out, bool, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idxmin, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idxmax, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__isPartitionIntersectNull__i__i__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idxmin, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idxmax, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionMin__i__i, , 3, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_x, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_y, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionMin__i__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_x, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_y, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionMax__i__i, , 3, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_x, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_y, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionMax__i__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_x, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_y, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__isPartitionIntersect1Part__i_X__i_X__i_X__i_X, , 5, out, bool, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idxmin, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idxmax, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__isPartitionIntersect1Part__i_X__i_X__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idxmin, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idxmax, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__isPartitionIntersect1Part__i__i__i_X__i_X, , 5, out, bool, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, int, (SACl_idxmin, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_idxmax, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__isPartitionIntersect1Part__i__i__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_idxmin, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_idxmax, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_bound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionIntersectMax__i_X__i_X, , 3, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_PWLbound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ivmin, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionIntersectMax__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_PWLbound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ivmin, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionIntersectMax__i_X__i, , 3, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_PWLbound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ivmin, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionIntersectMax__i_X__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_PWLbound1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ivmin, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionIntersectMin__i_X__i_X, , 3, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_PWLbound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ivmax, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionIntersectMin__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_PWLbound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ivmax, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionIntersectMin__i_X__i, , 3, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_PWLbound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ivmax, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionIntersectMin__i_X__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_PWLbound2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ivmax, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__prod__i_X, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_v, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__prod__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_v, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__partitionSlicer__i_X__i_X__i__i_X__i_X, , 9, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_4, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_min, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_max, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_axis, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_lb, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ub, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__partitionSlicer__i_X__i_X__i__i_X__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_4, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_min, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_max, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_axis, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_lb, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ub, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__gridFiller__i_X__i_X__i_X__i__i_X, , 8, out, int, (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), out, int, (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_lb, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_ub, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_wdth, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_dim, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_maxwidth, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__gridFiller__i_X__i_X__i_X__i__i_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_2, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_out( (SAC_arg_3, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_lb, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_ub, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_wdth, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_dim, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_maxwidth, (AKD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___PL_PL__i, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_a, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___PL_PL__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_a, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf__MAIN_CLsacprelude_p__zero__f_1, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf__MAIN_CLsacprelude_p__zero__f_1, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, , 4, out, float, (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf__MAIN_CLsacprelude_p__zero__f_X, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf__MAIN_CLsacprelude_p__zero__f_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf__MAIN_CLsacprelude_p__zero__f_X_X_X_X_X, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf__MAIN_CLsacprelude_p__zero__f_X_X_X_X_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p___ST__i__i, , 3, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_a, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_b, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p___ST__i__i, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_a, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_b, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); SAC_C_EXTERN /* * ND_FUN_DECL( SACf_sacprelude_p__and__bl__bl, , 3, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_a, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_b, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DECL_FUN2( SACf_sacprelude_p__and__bl__bl, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_a, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_b, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)); SAC_C_EXTERN /* * CUDA_GLOBALFUN_DECL( SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X, 14, inout, float, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), -7, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, float, (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, in, float, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), -7, in, int, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 1) */ __global__ void SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X(SAC_CUDA_PARAM_inout( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_CUDA_PARAM_in( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), int), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SIZE((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))))); ; SAC_C_EXTERN /* * CUDA_GLOBALFUN_DECL( SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, inout, float, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0) */ __global__ void SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f(SAC_CUDA_PARAM_in( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_CUDA_PARAM_inout( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); ; SAC_C_EXTERN /* * CUDA_GLOBALFUN_DECL( SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, inout, float, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0) */ __global__ void SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f(SAC_CUDA_PARAM_in( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_CUDA_PARAM_inout( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)); ; /* * global objects */ /* * ND_OBJDEF_EXTERN( (RandomGen, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Random__Random, 0) */ SAC_ND_DECL__DATA( (RandomGen, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Random__Random, extern) SAC_ND_DECL__DESC( (RandomGen, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), extern) SAC_NOTHING() /* * ND_OBJDEF_EXTERN( (SACo_World__TheWorld, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_World__World, 0) */ SAC_ND_DECL__DATA( (SACo_World__TheWorld, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_World__World, extern) SAC_ND_DECL__DESC( (SACo_World__TheWorld, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), extern) SAC_NOTHING() /* * ND_OBJDEF_EXTERN( (SACo_C99Benchmarking__TheBenchmarkObject, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_C99Benchmarking__C99Benchmarking, 0) */ SAC_ND_DECL__DATA( (SACo_C99Benchmarking__TheBenchmarkObject, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_C99Benchmarking__C99Benchmarking, extern) SAC_ND_DECL__DESC( (SACo_C99Benchmarking__TheBenchmarkObject, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), extern) SAC_NOTHING() /* * ND_OBJDEF_EXTERN( (SACo_MTClock__TheMTClock, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_MTClock__MTClock, 0) */ SAC_ND_DECL__DATA( (SACo_MTClock__TheMTClock, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_MTClock__MTClock, extern) SAC_ND_DECL__DESC( (SACo_MTClock__TheMTClock, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), extern) SAC_NOTHING() /* * ND_OBJDEF_EXTERN( (SACo_Terminal__TheTerminal, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Terminal__Terminal, 0) */ SAC_ND_DECL__DATA( (SACo_Terminal__TheTerminal, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Terminal__Terminal, extern) SAC_ND_DECL__DESC( (SACo_Terminal__TheTerminal, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), extern) SAC_NOTHING() /* * ND_OBJDEF_EXTERN( (SACo_TermFile__stdout, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_TermFile__TermFile, 0) */ SAC_ND_DECL__DATA( (SACo_TermFile__stdout, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_TermFile__TermFile, extern) SAC_ND_DECL__DESC( (SACo_TermFile__stdout, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), extern) SAC_NOTHING() /* * function definitions (FUNDEFS) */ /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN::SACwf__MAIN__main(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN__main, , 1, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN__main, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf__MAIN__main, , 1, out, int, SAC_SET_NT_USG( FAG, (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SAC_ND_FUNAP2( SACf__MAIN__main, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)) /* * ND_REFRESH__MIRROR( (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), (SACp_cwc_544, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN__main, , 1, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__i_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__i_S, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__i_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__i_S, , 2, out, int, SAC_SET_NT_USG( FAG, (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__i_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))), int)) /* * ND_REFRESH__MIRROR( (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), (SACp_cwc_716, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__i_S, , 2, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), in, int, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__d_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__d_S, , 2, out, double, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, double, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__d_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), double)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, )))))))))), double, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, )))))))))), double, ) SAC_ND_DECL__DESC( (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__d_S, , 2, out, double, SAC_SET_NT_USG( FAG, (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, ))))))))))), in, double, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__d_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, ))))))))))), double), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))), double)) /* * ND_REFRESH__MIRROR( (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), (SACp_cwc_718, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (DOU, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__d_S, , 2, out, double, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, )))))))))), in, double, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (DOU, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__f_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__f_S, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__f_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { SAC_ND_DECL_CONST__DATA((SACp_pinl_9392__emal_8357__cwc_719__SSA1_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0.0f) SAC_ND_DECL_CONST__DATA((SACp_pinl_9391__emal_8356__cwc_733, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0.0f) /* * ND_DECL( (SACp_emal_8355__cwc_721, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8355__cwc_721, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_8355__cwc_721, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), bool, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), bool, ) SAC_ND_DECL__DESC( (SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); SAC_INIT_LOCAL_MEM() SAC_ND_PRF_DIM_A__DATA((SACp_emal_8355__cwc_721, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -2) SAC_ND_DEC_RC_FREE((SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) SAC_ND_PRF_SxS__DATA((SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), SAC_ND_PRF_EQ, SAC_ND_READ((SACp_emal_8355__cwc_721, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 5) SAC_ND_FREE((SACp_emal_8355__cwc_721, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) if (SAC_ND_GETVAR((SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), SACp_emal_8354__cwc_735)) { SAC_ND_FREE((SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), ) /* * ND_ASSIGN( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_pinl_9391__emal_8356__cwc_733, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 106, "Assignment with incompatible types found!"); SAC_NOOP() SAC_NOOP() SAC_NOOP() SAC_ND_ASSIGN__DATA( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_pinl_9391__emal_8356__cwc_733, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) } else { SAC_ND_FREE((SACp_emal_8354__cwc_735, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), ) /* * ND_ASSIGN( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_pinl_9392__emal_8357__cwc_719__SSA1_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 106, "Assignment with incompatible types found!"); SAC_NOOP() SAC_NOOP() SAC_NOOP() SAC_ND_ASSIGN__DATA( (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_pinl_9392__emal_8357__cwc_719__SSA1_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) } /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_cwc_719, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__f_S, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AUD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__c_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__c_S, , 2, out, unsigned char, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, unsigned char, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__c_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), unsigned char)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), unsigned char, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), unsigned char, ) SAC_ND_DECL__DESC( (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__c_S, , 2, out, unsigned char, SAC_SET_NT_USG( FAG, (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))), in, unsigned char, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__c_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))), unsigned char), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))), unsigned char)) /* * ND_REFRESH__MIRROR( (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), (SACp_cwc_737, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__c_S, , 2, out, unsigned char, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, )))))))))), in, unsigned char, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UCH, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__bl_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__bl_S, , 2, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__bl_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), bool)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), bool, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), bool, ) SAC_ND_DECL__DESC( (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__bl_S, , 2, out, bool, SAC_SET_NT_USG( FAG, (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, ))))))))))), in, bool, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__bl_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, ))))))))))), bool), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))), bool)) /* * ND_REFRESH__MIRROR( (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), (SACp_cwc_739, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BOO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__bl_S, , 2, out, bool, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, )))))))))), in, bool, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BOO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__b_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__b_S, , 2, out, byte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, byte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__b_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), byte)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, )))))))))), byte, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, )))))))))), byte, ) SAC_ND_DECL__DESC( (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__b_S, , 2, out, byte, SAC_SET_NT_USG( FAG, (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, ))))))))))), in, byte, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__b_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, ))))))))))), byte), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))), byte)) /* * ND_REFRESH__MIRROR( (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), (SACp_cwc_741, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (BYT, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__b_S, , 2, out, byte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, )))))))))), in, byte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (BYT, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__s_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__s_S, , 2, out, short, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, short, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__s_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), short)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, )))))))))), short, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, )))))))))), short, ) SAC_ND_DECL__DESC( (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__s_S, , 2, out, short, SAC_SET_NT_USG( FAG, (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, ))))))))))), in, short, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__s_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, ))))))))))), short), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))), short)) /* * ND_REFRESH__MIRROR( (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), (SACp_cwc_743, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (SHO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__s_S, , 2, out, short, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, )))))))))), in, short, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (SHO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__l_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__l_S, , 2, out, long, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, long, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__l_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), long)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, )))))))))), long, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, )))))))))), long, ) SAC_ND_DECL__DESC( (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__l_S, , 2, out, long, SAC_SET_NT_USG( FAG, (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, ))))))))))), in, long, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__l_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, ))))))))))), long), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))), long)) /* * ND_REFRESH__MIRROR( (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), (SACp_cwc_745, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LON, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__l_S, , 2, out, long, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, )))))))))), in, long, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LON, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__ll_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__ll_S, , 2, out, longlong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, longlong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__ll_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), longlong)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, )))))))))), longlong, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, )))))))))), longlong, ) SAC_ND_DECL__DESC( (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__ll_S, , 2, out, longlong, SAC_SET_NT_USG( FAG, (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, ))))))))))), in, longlong, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__ll_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, ))))))))))), longlong), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))), longlong)) /* * ND_REFRESH__MIRROR( (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), (SACp_cwc_747, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (LLO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__ll_S, , 2, out, longlong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, )))))))))), in, longlong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (LLO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__ub_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__ub_S, , 2, out, ubyte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, ubyte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__ub_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), ubyte)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, )))))))))), ubyte, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, )))))))))), ubyte, ) SAC_ND_DECL__DESC( (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__ub_S, , 2, out, ubyte, SAC_SET_NT_USG( FAG, (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, ))))))))))), in, ubyte, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__ub_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, ))))))))))), ubyte), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))), ubyte)) /* * ND_REFRESH__MIRROR( (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), (SACp_cwc_749, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UBY, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__ub_S, , 2, out, ubyte, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, )))))))))), in, ubyte, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UBY, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__us_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__us_S, , 2, out, ushort, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, ushort, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__us_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), ushort)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, )))))))))), ushort, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, )))))))))), ushort, ) SAC_ND_DECL__DESC( (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__us_S, , 2, out, ushort, SAC_SET_NT_USG( FAG, (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, ))))))))))), in, ushort, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__us_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, ))))))))))), ushort), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))), ushort)) /* * ND_REFRESH__MIRROR( (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), (SACp_cwc_751, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (USH, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__us_S, , 2, out, ushort, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, )))))))))), in, ushort, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (USH, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__ui_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__ui_S, , 2, out, uint, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, uint, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__ui_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), uint)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, )))))))))), uint, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, )))))))))), uint, ) SAC_ND_DECL__DESC( (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__ui_S, , 2, out, uint, SAC_SET_NT_USG( FAG, (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, ))))))))))), in, uint, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__ui_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, ))))))))))), uint), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))), uint)) /* * ND_REFRESH__MIRROR( (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), (SACp_cwc_753, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UIN, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__ui_S, , 2, out, uint, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, )))))))))), in, uint, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (UIN, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__ul_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__ul_S, , 2, out, ulong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, ulong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__ul_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), ulong)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, )))))))))), ulong, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, )))))))))), ulong, ) SAC_ND_DECL__DESC( (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__ul_S, , 2, out, ulong, SAC_SET_NT_USG( FAG, (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, ))))))))))), in, ulong, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__ul_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, ))))))))))), ulong), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))), ulong)) /* * ND_REFRESH__MIRROR( (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), (SACp_cwc_755, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__ul_S, , 2, out, ulong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, )))))))))), in, ulong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * Wrapper function: * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACwf__MAIN_CLsacprelude_p__zero__ull_S(...) [ wrapper ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACwf__MAIN_CLsacprelude_p__zero__ull_S, , 2, out, ulonglong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, ulonglong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACwf__MAIN_CLsacprelude_p__zero__ull_S, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong), SAC_ND_PARAM_in( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), ulonglong)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, )))))))))), ulonglong, 0) */ SAC_ND_DECL__DATA( (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, )))))))))), ulonglong, ) SAC_ND_DECL__DESC( (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, )))))))))), ) SAC_NOTHING() /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), -2) */ int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))); int SAC_ND_A_MIRROR_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) = SAC_ND_A_DESC_DIM( (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))); SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_sacprelude_p__zero__ull_S, , 2, out, ulonglong, SAC_SET_NT_USG( FAG, (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, ))))))))))), in, ulonglong, SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))))) */ SAC_ND_FUNAP2( SACf_sacprelude_p__zero__ull_S, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, ))))))))))), ulonglong), SAC_ND_ARG_in( SAC_SET_NT_USG( FPA, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))), ulonglong)) /* * ND_REFRESH__MIRROR( (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), (SACp_cwc_757, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (ULL, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACwf__MAIN_CLsacprelude_p__zero__ull_S, , 2, out, ulonglong, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, )))))))))), in, ulonglong, (SACl_A, (AUD, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (ULL, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN::SACf__MAIN__main(...) [ body ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACf__MAIN__main, , 1, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACf__MAIN__main, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { /* * ND_DECL( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), unsigned char, -3) */ SAC_ND_DECL__DATA( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), unsigned char, ) SAC_ND_DECL__DESC( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0); int SAC_ND_A_MIRROR_SIZE( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) = 1; /* * ND_DECL( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; /* * ND_DECL( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; SAC_ND_DECL_CONST__DATA((SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 2147483647) /* * ND_DECL( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() SAC_ND_DECL_CONST__DATA((SACp_emal_8395__cnstass_8337_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8394__cnstass_8336_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8393__cnstass_8335_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8392__cnstass_8334_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8391__cnstass_8333_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8390__cnstass_8332_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 7) SAC_ND_DECL_CONST__DATA((SACp_emal_8389__cnstass_8331_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 7) SAC_ND_DECL_CONST__DATA((SACp_emal_8388__cnstass_8330_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 32) SAC_ND_DECL_CONST__DATA((SACp_emal_8387__cnstass_8329_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 32) SAC_ND_DECL_CONST__DATA((SACp_emal_8386__cnstass_8328_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 32) /* * ND_DECL( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, 5, 32, 32, 32, 7, 7) */ SAC_ND_DECL__DATA( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = 7; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = 7; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 1605632; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 5; /* * ND_DECL( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 5, 32, 32, 32, 7, 7) */ SAC_ND_DECL__DATA( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = 7; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = 7; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 1605632; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; /* * ND_DECL( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() SAC_ND_DECL_CONST__DATA((SACp_emal_8380__cnstass_8327_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8379__cnstass_8326_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8378__cnstass_8325_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8377__cnstass_8324_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8376__cnstass_8323_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_8375__cnstass_8322_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 7) SAC_ND_DECL_CONST__DATA((SACp_emal_8374__cnstass_8321_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 7) SAC_ND_DECL_CONST__DATA((SACp_emal_8373__cnstass_8320_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 32) SAC_ND_DECL_CONST__DATA((SACp_emal_8372__cnstass_8319_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 32) SAC_ND_DECL_CONST__DATA((SACp_emal_8371__cnstass_8318_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 32) /* * ND_DECL( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, 5, 32, 32, 32, 7, 7) */ SAC_ND_DECL__DATA( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = 7; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = 7; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 1605632; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 5; /* * ND_DECL( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 5, 32, 32, 32, 7, 7) */ SAC_ND_DECL__DATA( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = 32; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = 7; const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = 7; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 1605632; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; SAC_ND_DECL_CONST__DATA((SACp_emal_8367__flat_471, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 2) SAC_ND_DECL_CONST__DATA((SACp_emal_8366__flat_469, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 4) /* * ND_DECL( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), unsigned char, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), unsigned char, ) SAC_ND_DECL__DESC( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 1, 1) */ SAC_ND_DECL__DATA( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = 1; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 1; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_8358__pinl_1816__flat_252, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_8358__pinl_1816__flat_252, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_8358__pinl_1816__flat_252, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; /* * ND_DECL( (SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval, 0) */ SAC_ND_DECL__DATA( (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_Interval__Interval, ) SAC_ND_DECL__DESC( (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_String__string, 0) */ SAC_ND_DECL__DATA( (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), SACt_String__string, ) SAC_ND_DECL__DESC( (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf__MAIN_CL_INIT__init, , 0) */ SAC_ND_FUNAP2( SACf__MAIN_CL_INIT__init, ) /* * ND_FUN_AP( SACrandom, SACp_flat_444, 2, in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SACp_flat_444 = SACrandom( SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)); SAC_ND_ALLOC__DESC((SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) /* * ND_REFRESH__MIRROR( (SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_NOOP() SAC_ND_SET__RC((SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) SAC_ND_ALLOC_BEGIN((SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 306, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) SAC_ND_PRF_S__DATA((SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_TOF, SAC_ND_READ((SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_DEC_RC_FREE((SACp_flat_444, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, ) SAC_ND_PRF_SxS__DATA((SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_DIV, SAC_ND_READ((SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), 2147483648.0f) SAC_CUDA_ALLOC_BEGIN((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 5, 32, 32, 32, 7, 7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == 5), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) == 7), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) == 7), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_CUDA_ALLOC_END((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) SAC_ND_ALLOC_BEGIN((SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * CUDA_GRID_BLOCK( 15, SAC_ND_READ( (SACp_emal_8390__cnstass_8332_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8389__cnstass_8331_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8388__cnstass_8330_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8387__cnstass_8329_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8386__cnstass_8328_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8395__cnstass_8337_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8394__cnstass_8336_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8393__cnstass_8335_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8392__cnstass_8334_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8391__cnstass_8333_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, 0, 0, 0) */ { dim3 grid((SAC_ND_READ( (SACp_emal_8387__cnstass_8329_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8392__cnstass_8334_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_8386__cnstass_8328_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8391__cnstass_8333_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); if (grid.x > 2147483647 || grid.y > 65535 || grid.z > 65535) { SAC_RuntimeError("CUDA XYZ grid dimension exceeds compute compatibilities max value: 2147483647 x 65535 x 65535"); } dim3 block((SAC_ND_READ( (SACp_emal_8390__cnstass_8332_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8395__cnstass_8337_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_8389__cnstass_8331_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8394__cnstass_8336_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_8388__cnstass_8330_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8393__cnstass_8335_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); if (block.x > 2147483647 || block.y > 65535 || block.z > 65535) { SAC_RuntimeError("CUDA XYZ block dimension exceeds compute compatibilities max value: 2147483647 x 65535 x 65535"); } /* * CUDA_GLOBALFUN_AP( SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), inout, float, 5, SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8390__cnstass_8332_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8389__cnstass_8331_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8388__cnstass_8330_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8387__cnstass_8329_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8386__cnstass_8328_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8395__cnstass_8337_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8394__cnstass_8336_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8393__cnstass_8335_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8392__cnstass_8334_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8391__cnstass_8333_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f<<<grid, block>>>( SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float), SAC_CUDA_ARG_inout( SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), float), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 0), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 1), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 2), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 3), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 4), SAC_ND_A_MIRROR_SIZE(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_ND_A_MIRROR_DIM(SAC_SET_NT_USG( FAG, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8390__cnstass_8332_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8389__cnstass_8331_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8388__cnstass_8330_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8387__cnstass_8329_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8386__cnstass_8328_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8395__cnstass_8337_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8394__cnstass_8336_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8393__cnstass_8335_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8392__cnstass_8334_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8391__cnstass_8333_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)); SAC_CUDA_GET_LAST_KERNEL_ERROR(); } /* * ND_REFRESH__MIRROR( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 5) */ SAC_NOOP() SAC_ND_FREE((SACp_emal_8390__cnstass_8332_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8389__cnstass_8331_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8388__cnstass_8330_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8387__cnstass_8329_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8386__cnstass_8328_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8395__cnstass_8337_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8394__cnstass_8336_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8393__cnstass_8335_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8392__cnstass_8334_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8391__cnstass_8333_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 5, 32, 32, 32, 7, 7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 5), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) == 7), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) == 7), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * CUDA_MEM_TRANSFER( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, cudaMemcpyDeviceToHost) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_ND_A_SIZE( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), 222, "cudaMemcpy: Destionation and source arrays should have equal sizes!"); SAC_CUDA_MEM_TRANSFER((SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, cudaMemcpyDeviceToHost) SAC_CUDA_FREE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) /* * ND_FUN_AP( SACrandom, SACp_flat_457, 2, in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SACp_flat_457 = SACrandom( SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)); SAC_ND_ALLOC__DESC((SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) /* * ND_REFRESH__MIRROR( (SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_NOOP() SAC_ND_SET__RC((SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) SAC_ND_ALLOC_BEGIN((SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 306, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) SAC_ND_PRF_S__DATA((SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_TOF, SAC_ND_READ((SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_DEC_RC_FREE((SACp_flat_457, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, ) SAC_ND_PRF_SxS__DATA((SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_DIV, SAC_ND_READ((SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), 2147483648.0f) SAC_CUDA_ALLOC_BEGIN((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 5, 32, 32, 32, 7, 7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == 5), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) == 7), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) == 7), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_CUDA_ALLOC_END((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) SAC_ND_ALLOC_BEGIN((SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * CUDA_GRID_BLOCK( 15, SAC_ND_READ( (SACp_emal_8375__cnstass_8322_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8374__cnstass_8321_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8373__cnstass_8320_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8372__cnstass_8319_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8371__cnstass_8318_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8380__cnstass_8327_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8379__cnstass_8326_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8378__cnstass_8325_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8377__cnstass_8324_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_8376__cnstass_8323_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, 0, 0, 0) */ { dim3 grid((SAC_ND_READ( (SACp_emal_8372__cnstass_8319_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8377__cnstass_8324_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_8371__cnstass_8318_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8376__cnstass_8323_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); if (grid.x > 2147483647 || grid.y > 65535 || grid.z > 65535) { SAC_RuntimeError("CUDA XYZ grid dimension exceeds compute compatibilities max value: 2147483647 x 65535 x 65535"); } dim3 block((SAC_ND_READ( (SACp_emal_8375__cnstass_8322_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8380__cnstass_8327_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_8374__cnstass_8321_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8379__cnstass_8326_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_8373__cnstass_8320_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_8378__cnstass_8325_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); if (block.x > 2147483647 || block.y > 65535 || block.z > 65535) { SAC_RuntimeError("CUDA XYZ block dimension exceeds compute compatibilities max value: 2147483647 x 65535 x 65535"); } /* * CUDA_GLOBALFUN_AP( SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), inout, float, 5, SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8375__cnstass_8322_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8374__cnstass_8321_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8373__cnstass_8320_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8372__cnstass_8319_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8371__cnstass_8318_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8380__cnstass_8327_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8379__cnstass_8326_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8378__cnstass_8325_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8377__cnstass_8324_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_8376__cnstass_8323_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f<<<grid, block>>>( SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float), SAC_CUDA_ARG_inout( SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), float), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 0), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 1), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 2), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 3), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 4), SAC_ND_A_MIRROR_SIZE(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_ND_A_MIRROR_DIM(SAC_SET_NT_USG( FAG, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8375__cnstass_8322_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8374__cnstass_8321_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8373__cnstass_8320_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8372__cnstass_8319_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8371__cnstass_8318_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8380__cnstass_8327_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8379__cnstass_8326_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8378__cnstass_8325_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8377__cnstass_8324_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8376__cnstass_8323_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)); SAC_CUDA_GET_LAST_KERNEL_ERROR(); } /* * ND_REFRESH__MIRROR( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 5) */ SAC_NOOP() SAC_ND_FREE((SACp_emal_8375__cnstass_8322_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8374__cnstass_8321_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8373__cnstass_8320_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8372__cnstass_8319_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8371__cnstass_8318_ub, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8380__cnstass_8327_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8379__cnstass_8326_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8378__cnstass_8325_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8377__cnstass_8324_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8376__cnstass_8323_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 5, 32, 32, 32, 7, 7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 5), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) == 32), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) == 7), 222, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) == 7), 222, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * CUDA_MEM_TRANSFER( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, cudaMemcpyDeviceToHost) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_ND_A_SIZE( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), 222, "cudaMemcpy: Destionation and source arrays should have equal sizes!"); SAC_CUDA_MEM_TRANSFER((SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, cudaMemcpyDeviceToHost) SAC_CUDA_FREE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 1, 1, unsigned char) /* * ND_SET__SHAPE_arr( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) == 1), 1, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0) == 5), 1, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 1, 1, unsigned char) SAC_ND_CREATE__STRING__DATA((SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), "work") /* * ND_ASSIGN( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), -3, (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 1, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) == 1), 51, "Assignment with incompatible types found!"); SAC_ND_A_DESC( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) = SAC_ND_A_DESC( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0); SAC_ND_A_MIRROR_SIZE( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) = SAC_ND_A_SIZE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), 0); SAC_ND_A_DESC_SIZE( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))) = SAC_ND_A_SIZE( (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))); SAC_ND_ASSIGN__DATA( (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), (SACp_emal_8365__flat_463, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, )))))))))), ) /* * ND_FUN_AP( to_string, , 3, out, SACt_String__string, SAC_SET_NT_USG( FAG, (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, unsigned char, SAC_SET_NT_USG( FAG, (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))), in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8366__flat_469, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SAC_ND_FUNAP2( to_string, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), SACt_String__string), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_tcp_9406__emal_8365__flat_463, (AKD, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (UCH, ))))))))))), unsigned char), SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8366__flat_469, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)) /* * ND_REFRESH__MIRROR( (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) */ SAC_NOOP() SAC_ND_DEC_RC_FREE((SACp_emal_8366__flat_469, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, ) /* * ND_FUN_AP( SACf_C99Benchmarking__getInterval__SACt_String__string__i__i, , 4, out, SACt_Interval__Interval, SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, SACt_String__string, SAC_SET_NT_USG( FAG, (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, int, SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, SAC_SET_NT_USG( FAG, (SACp_emal_8367__flat_471, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SAC_ND_FUNAP2( SACf_C99Benchmarking__getInterval__SACt_String__string__i__i, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), SACt_Interval__Interval), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_flat_462, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), SACt_String__string), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8367__flat_471, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)) /* * ND_REFRESH__MIRROR( (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) */ SAC_NOOP() /* * ND_FUN_AP( SACf_C99Benchmarking__start__SACt_C99Benchmarking__Interval, , 1, inout, SACt_Interval__Interval, SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))) */ SAC_ND_FUNAP2( SACf_C99Benchmarking__start__SACt_C99Benchmarking__Interval, SAC_ND_ARG_inout( SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), SACt_Interval__Interval)) /* * ND_FUN_AP( SACrandom, SACp_flat_475, 2, in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in_nodesc, int, SAC_SET_NT_USG( FAG, (SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))) */ SACp_flat_475 = SACrandom( SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_ND_ARG_in_nodesc( SAC_SET_NT_USG( FAG, (SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int)); SAC_ND_ALLOC__DESC((SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) /* * ND_REFRESH__MIRROR( (SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_NOOP() SAC_ND_SET__RC((SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) SAC_ND_DEC_RC_FREE((SACp_emal_8398__flat_448, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, ) SAC_ND_ALLOC_BEGIN((SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 306, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) SAC_ND_PRF_S__DATA((SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_TOF, SAC_ND_READ((SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_DEC_RC_FREE((SACp_flat_475, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, ) SAC_ND_PRF_SxS__DATA((SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_DIV, SAC_ND_READ((SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), 2147483648.0f) SAC_ND_ALLOC_BEGIN((SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 1, float) /* * ND_SET__SHAPE_arr( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 1) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 1), 54, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) == 1), 54, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 1, float) /* * ND_CREATE__ARRAY__DATA( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 1, (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) */ SAC_ND_WRITE_COPY( (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, SAC_ND_READ( (SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_8364__pinl_1801__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) /* * ND_ASSIGN( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 5, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 5), 54, "Assignment with incompatible types found!"); SAC_ND_A_DESC( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_MIRROR_SIZE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_SIZE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_SHAPE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_DESC_SIZE( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_SIZE( (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_ASSIGN__DATA( (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_8368_W1, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) /* * ND_ASSIGN( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 5, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 5), 54, "Assignment with incompatible types found!"); SAC_ND_A_DESC( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_MIRROR_SIZE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_SIZE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_DESC_SHAPE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_SHAPE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_DESC_SIZE( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_SIZE( (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_ASSIGN__DATA( (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_8383_I, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) /* * ND_FUN_AP( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, , 4, out, float, SAC_SET_NT_USG( FAG, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), in, float, SAC_SET_NT_USG( FAG, (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), in, float, SAC_SET_NT_USG( FAG, (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), in, float, SAC_SET_NT_USG( FAG, (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))))) */ SAC_ND_FUNAP2( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, SAC_ND_ARG_out( SAC_SET_NT_USG( FAG, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_tcp_9404__emal_8383_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_tcp_9405__emal_8368_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float), SAC_ND_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_8362__flat_472, (AKS, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float)) /* * ND_REFRESH__MIRROR( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7) */ SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_MIRROR_SIZE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ND_ALLOC_BEGIN((SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 54, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 54, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_PRF_SHAPE_A__DATA( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7) */ SAC_TR_PRF_PRINT( ("ND_PRF_SHAPE_A__DATA( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7)")) SAC_ND_WRITE_COPY( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_A_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), ); SAC_ND_WRITE_COPY( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, SAC_ND_A_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1), ); SAC_ND_WRITE_COPY( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2, SAC_ND_A_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2), ); SAC_ND_WRITE_COPY( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, SAC_ND_A_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3), ); SAC_ND_WRITE_COPY( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4, SAC_ND_A_SHAPE( (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4), ); /* * ND_FUN_AP( SACf_C99Benchmarking__end__SACt_C99Benchmarking__Interval, , 1, inout, SACt_Interval__Interval, SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))) */ SAC_ND_FUNAP2( SACf_C99Benchmarking__end__SACt_C99Benchmarking__Interval, SAC_ND_ARG_inout( SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), SACt_Interval__Interval)) /* * ND_FUN_AP( SACf_C99Benchmarking__printResult__SACt_C99Benchmarking__Interval, , 1, inout, SACt_Interval__Interval, SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))) */ SAC_ND_FUNAP2( SACf_C99Benchmarking__printResult__SACt_C99Benchmarking__Interval, SAC_ND_ARG_inout( SAC_SET_NT_USG( FAG, (SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), SACt_Interval__Interval)) SAC_ND_DEC_RC_FREE((SACl_interval, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, ) /* * ND_IDXS2OFFSET_id( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_DEC_RC_FREE((SACp_emal_8399__isaa_2620__rso_494_TheWorld, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, ) SAC_ND_FREE((SACp_emal_8361__isaa_2693_O, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACl_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_emal_8360__ivesli_8021, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_PRF_S__DATA((SACp_emal_8358__pinl_1816__flat_252, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_TOI, SAC_ND_READ((SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0)) SAC_ND_FREE((SACp_emal_8359__pinl_1813__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), (SACp_emal_8358__pinl_1816__flat_252, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), (SACp_emal_8358__pinl_1816__flat_252, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACf__MAIN__main, , 1, out, int, (SAC_arg_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN:_INIT::SACf__MAIN_CL_INIT__init(...) [ body ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACf__MAIN_CL_INIT__init, , 0) */ SAC_ND_DEF_FUN_BEGIN2( SACf__MAIN_CL_INIT__init, void, void) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { SAC_INIT_LOCAL_MEM() /* * ND_FUN_AP( SACf_World_CL_INIT__init_TheWorld__SACt_World__World, , 1, inout, SACt_World__World, SAC_SET_NT_USG( TFA, (SACo_World__TheWorld, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, )))))))))) */ SAC_ND_FUNAP2( SACf_World_CL_INIT__init_TheWorld__SACt_World__World, SAC_ND_ARG_inout( SAC_SET_NT_USG( TFA, (SACo_World__TheWorld, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, ))))))))), SACt_World__World)) /* * ND_FUN_AP( SACf_C99Benchmarking_CL_INIT__init_TheBenchmarkObject__SACt_C99Benchmarking__C99Benchmarking, , 1, inout, SACt_C99Benchmarking__C99Benchmarking, SAC_SET_NT_USG( TFA, (SACo_C99Benchmarking__TheBenchmarkObject, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, )))))))))) */ SAC_ND_FUNAP2( SACf_C99Benchmarking_CL_INIT__init_TheBenchmarkObject__SACt_C99Benchmarking__C99Benchmarking, SAC_ND_ARG_inout( SAC_SET_NT_USG( TFA, (SACo_C99Benchmarking__TheBenchmarkObject, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, ))))))))), SACt_C99Benchmarking__C99Benchmarking)) /* * ND_FUN_AP( SACf_MTClock_CL_INIT__init_TheMTClock__SACt_MTClock__MTClock, , 1, inout, SACt_MTClock__MTClock, SAC_SET_NT_USG( TFA, (SACo_MTClock__TheMTClock, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, )))))))))) */ SAC_ND_FUNAP2( SACf_MTClock_CL_INIT__init_TheMTClock__SACt_MTClock__MTClock, SAC_ND_ARG_inout( SAC_SET_NT_USG( TFA, (SACo_MTClock__TheMTClock, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, ))))))))), SACt_MTClock__MTClock)) /* * ND_FUN_AP( SACf_Terminal_CL_INIT__init_TheTerminal__SACt_Terminal__Terminal, , 1, inout, SACt_Terminal__Terminal, SAC_SET_NT_USG( TFA, (SACo_Terminal__TheTerminal, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, )))))))))) */ SAC_ND_FUNAP2( SACf_Terminal_CL_INIT__init_TheTerminal__SACt_Terminal__Terminal, SAC_ND_ARG_inout( SAC_SET_NT_USG( TFA, (SACo_Terminal__TheTerminal, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, ))))))))), SACt_Terminal__Terminal)) /* * ND_FUN_AP( SACf_TermFile_CL_INIT__init_stdout__SACt_TermFile__TermFile, , 1, inout, SACt_TermFile__TermFile, SAC_SET_NT_USG( TFA, (SACo_TermFile__stdout, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, )))))))))) */ SAC_ND_FUNAP2( SACf_TermFile_CL_INIT__init_stdout__SACt_TermFile__TermFile, SAC_ND_ARG_inout( SAC_SET_NT_USG( TFA, (SACo_TermFile__stdout, (SCL, (HID, (NUQ, (INT, (GLO, (NON, (NOT, ))))))))), SACt_TermFile__TermFile)) /* * ND_FUN_RET( , 0) */ return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACf__MAIN_CL_INIT__init, , 0) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACf__MAIN_CLsacprelude_p__zero__f_1(...) [ body ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACf__MAIN_CLsacprelude_p__zero__f_1, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACf__MAIN_CLsacprelude_p__zero__f_1, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { SAC_ND_DECL_CONST__DATA((SACp_emal_9281__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0.0f) /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, 1) */ const int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0) = 1; const int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 1; const int SAC_ND_A_MIRROR_DIM( (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 1; SAC_INIT_LOCAL_MEM() SAC_ND_DEC_RC_FREE((SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9281__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9281__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACf__MAIN_CLsacprelude_p__zero__f_1, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN::SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1(...) [ body ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, , 4, out, float, (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, void, SAC_ND_PARAM_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { SAC_ND_DECL_CONST__DATA((SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1) /* * ND_DECL( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; SAC_ND_DECL_CONST__DATA((SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 2) SAC_ND_DECL_CONST__DATA((SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 3) SAC_ND_DECL_CONST__DATA((SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 4) /* * ND_DECL( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() SAC_ND_DECL_CONST__DATA((SACp_emal_9315__flat_67, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0.0f) /* * ND_DECL( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, -5) */ SAC_ND_DECL__DATA( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 3; /* * ND_DECL( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; /* * ND_DECL( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 5; /* * ND_DECL( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 1; SAC_ND_DECL_CONST__DATA((SACp_emal_9292__cnstass_8342_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_9291__cnstass_8341_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_9290__cnstass_8340_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_9289__cnstass_8339_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) SAC_ND_DECL_CONST__DATA((SACp_emal_9288__cnstass_8338_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) /* * ND_DECL( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = 5; /* * ND_DECL( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, -7) */ SAC_ND_DECL__DATA( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = 5; /* * ND_DECL( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 3) */ SAC_ND_DECL__DATA( (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 3; const int SAC_ND_A_MIRROR_SIZE( (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 3; const int SAC_ND_A_MIRROR_DIM( (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 3) */ SAC_ND_DECL__DATA( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 3; const int SAC_ND_A_MIRROR_SIZE( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 3; const int SAC_ND_A_MIRROR_DIM( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ SAC_ND_DECL__DATA( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; /* * ND_DECL__MIRROR_PARAM( (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, 1) */ const int SAC_ND_A_MIRROR_SHAPE( (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0) = 1; const int SAC_ND_A_MIRROR_SIZE( (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 1; const int SAC_ND_A_MIRROR_DIM( (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 1; /* * ND_DECL__MIRROR_PARAM( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7) */ int SAC_ND_A_MIRROR_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 5; /* * ND_DECL__MIRROR_PARAM( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7) */ int SAC_ND_A_MIRROR_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 5; SAC_INIT_LOCAL_MEM() SAC_ND_ALLOC_BEGIN((SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 19, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 19, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_PRF_SHAPE_A__DATA( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7) */ SAC_TR_PRF_PRINT( ("ND_PRF_SHAPE_A__DATA( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7)")) SAC_ND_WRITE_COPY( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_A_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0), ); SAC_ND_WRITE_COPY( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, SAC_ND_A_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1), ); SAC_ND_WRITE_COPY( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2, SAC_ND_A_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2), ); SAC_ND_WRITE_COPY( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, SAC_ND_A_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3), ); SAC_ND_WRITE_COPY( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4, SAC_ND_A_SHAPE( (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4), ); SAC_ND_ALLOC_BEGIN((SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 19, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 19, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_PRF_SHAPE_A__DATA( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7) */ SAC_TR_PRF_PRINT( ("ND_PRF_SHAPE_A__DATA( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7)")) SAC_ND_WRITE_COPY( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0), ); SAC_ND_WRITE_COPY( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1), ); SAC_ND_WRITE_COPY( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2, SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2), ); SAC_ND_WRITE_COPY( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3), ); SAC_ND_WRITE_COPY( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4, SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4), ); /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 149, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 149, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 149, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 149, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_PRF_SxS__DATA((SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_SUB, SAC_ND_READ((SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 149, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_PRF_SxS__DATA((SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_SUB, SAC_ND_READ((SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_PRF_SxS__DATA((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_MAX, 0, SAC_ND_READ((SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 197, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 197, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_COPY__DATA((SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 197, "2nd argument of _idx_modarray_AxSxS_ is not a scalar!"); { int SAC_idx; SAC_idx = SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_ND_WRITE_COPY( (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_idx, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) , ) } SAC_ND_PRF_SxS__DATA((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_MAX, 0, SAC_ND_READ((SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 197, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 197, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_COPY__DATA((SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 197, "2nd argument of _idx_modarray_AxSxS_ is not a scalar!"); { int SAC_idx; SAC_idx = SAC_ND_READ( (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_ND_WRITE_COPY( (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_idx, SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) , ) } SAC_ND_PRF_SxS__DATA((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_MAX, 0, SAC_ND_READ((SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 197, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 197, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_COPY__DATA((SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 197, "2nd argument of _idx_modarray_AxSxS_ is not a scalar!"); { int SAC_idx; SAC_idx = SAC_ND_READ( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_ND_WRITE_COPY( (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_idx, SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) , ) } SAC_ND_PRF_SxS__DATA((SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_MAX, 0, SAC_ND_READ((SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 197, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 197, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_COPY__DATA((SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_MODARRAY_AxSxS__DATA( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 197, "2nd argument of _idx_modarray_AxSxS_ is not a scalar!"); { int SAC_idx; SAC_idx = SAC_ND_READ( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_ND_WRITE_COPY( (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_idx, SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) , ) } SAC_ND_PRF_SxS__DATA((SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_MAX, 0, SAC_ND_READ((SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9330__emec_8350__wlpg_1224_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9333__emec_8351__wlpg_1219_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9336__emec_8352__wlpg_1214_nmax, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9356__isaa_4930_B, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9351__pinl_2001__eat_517__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9339__emec_8353__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9350__pinl_2001__eat_517__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 26, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 26, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ( (SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9352__pinl_2001__eat_517__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_PRF_SxS__DATA((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_MIN, SAC_ND_READ((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * ND_COPY__SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 5), 25, "Assignment with incompatible types found!"); { int SAC_size = 1; SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_SHAPE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_DESC_SIZE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_MIRROR_SIZE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_size * SAC_ND_A_SIZE( (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_MIRROR_SIZE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) >= 0), 25, "Array with size <0 found!"); } SAC_ND_ALLOC_END((SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) SAC_ND_ALLOC_BEGIN((SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 25, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_PF_BEGIN_WITH(modarray) /* * WL_SCHEDULE__BEGIN( 5) */ { int SAC_WL_MT_SCHEDULE_START( 0); int SAC_WL_MT_SCHEDULE_STOP( 0); int SAC_WL_MT_SCHEDULE_START( 1); int SAC_WL_MT_SCHEDULE_STOP( 1); int SAC_WL_MT_SCHEDULE_START( 2); int SAC_WL_MT_SCHEDULE_STOP( 2); int SAC_WL_MT_SCHEDULE_START( 3); int SAC_WL_MT_SCHEDULE_STOP( 3); int SAC_WL_MT_SCHEDULE_START( 4); int SAC_WL_MT_SCHEDULE_STOP( 4); /* * WL_DECLARE_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ int SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); /* * WL_DEFINE_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ { int SAC_i; SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = 1 * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = 1 * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = 1 * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = 1 * SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = 1; } /* * MT_SCHEDULER_BEGIN( 0, 5, 0, SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, 0, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = 0; SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 3) = 0; SAC_WL_MT_SCHEDULE_STOP( 3) = SAC_ND_READ( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 4) = 0; SAC_WL_MT_SCHEDULE_STOP( 4) = SAC_ND_READ( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * WL_INIT_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_WL_MT_SCHEDULE_START( 0) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) + SAC_WL_MT_SCHEDULE_START( 1) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) + SAC_WL_MT_SCHEDULE_START( 2) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) + SAC_WL_MT_SCHEDULE_START( 3) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) + SAC_WL_MT_SCHEDULE_START( 4) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) /* * WL_SET_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, 5, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_WL_MT_SCHEDULE_START( 4) ) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_ND_ALLOC_BEGIN((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * WL_ASSIGN( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == (SAC_ND_A_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) - SAC_ND_A_SIZE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))))), 26, "WL expression with illegal dimension found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4)), 26, "WL expression with illegal size found!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_WL_INC_OFFSET((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) SAC_WL_GRID_UNROLL_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 5, 0, SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, 0, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * MT_SCHEDULER_BEGIN( 0, 5, 0, 0, SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = 0; SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 3) = 0; SAC_WL_MT_SCHEDULE_STOP( 3) = SAC_ND_READ( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 4) = 0; SAC_WL_MT_SCHEDULE_STOP( 4) = SAC_ND_READ( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * WL_INIT_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_WL_MT_SCHEDULE_START( 0) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) + SAC_WL_MT_SCHEDULE_START( 1) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) + SAC_WL_MT_SCHEDULE_START( 2) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) + SAC_WL_MT_SCHEDULE_START( 3) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) + SAC_WL_MT_SCHEDULE_START( 4) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) /* * WL_SET_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, 5, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_WL_MT_SCHEDULE_START( 4) ) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_ND_ALLOC_BEGIN((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * WL_ASSIGN( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == (SAC_ND_A_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) - SAC_ND_A_SIZE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))))), 26, "WL expression with illegal dimension found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4)), 26, "WL expression with illegal size found!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_WL_INC_OFFSET((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) SAC_WL_GRID_UNROLL_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 5, 0, 0, SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * MT_SCHEDULER_BEGIN( 0, 5, 0, 0, 0, SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = 0; SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = 0; SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 3) = SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_STOP( 3) = SAC_ND_READ( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 4) = 0; SAC_WL_MT_SCHEDULE_STOP( 4) = SAC_ND_READ( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * WL_INIT_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_WL_MT_SCHEDULE_START( 0) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) + SAC_WL_MT_SCHEDULE_START( 1) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) + SAC_WL_MT_SCHEDULE_START( 2) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) + SAC_WL_MT_SCHEDULE_START( 3) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) + SAC_WL_MT_SCHEDULE_START( 4) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) /* * WL_SET_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, 5, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_WL_MT_SCHEDULE_START( 4) ) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_ND_ALLOC_BEGIN((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * WL_ASSIGN( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == (SAC_ND_A_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) - SAC_ND_A_SIZE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))))), 26, "WL expression with illegal dimension found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4)), 26, "WL expression with illegal size found!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_WL_INC_OFFSET((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) SAC_WL_GRID_UNROLL_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 5, 0, 0, 0, SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * MT_SCHEDULER_BEGIN( 0, 5, 0, 0, 0, 0, SAC_ND_READ( (SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = 0; SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = 0; SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 3) = 0; SAC_WL_MT_SCHEDULE_STOP( 3) = SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 4) = SAC_ND_READ( (SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_STOP( 4) = SAC_ND_READ( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * WL_INIT_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_WL_MT_SCHEDULE_START( 0) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) + SAC_WL_MT_SCHEDULE_START( 1) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) + SAC_WL_MT_SCHEDULE_START( 2) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) + SAC_WL_MT_SCHEDULE_START( 3) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) + SAC_WL_MT_SCHEDULE_START( 4) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) /* * WL_SET_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, 5, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_WL_MT_SCHEDULE_START( 4) ) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_ND_ALLOC_BEGIN((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 69, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 69, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9313__ivesli_8135, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * WL_ASSIGN( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == (SAC_ND_A_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) - SAC_ND_A_SIZE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))))), 26, "WL expression with illegal dimension found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4)), 26, "WL expression with illegal size found!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_WL_INC_OFFSET((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9312__pinl_2290__flat_19, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) SAC_WL_GRID_UNROLL_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_READ((SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 5, 0, 0, 0, 0, SAC_ND_READ( (SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * MT_SCHEDULER_BEGIN( 0, 5, 0, 0, 0, 0, 0, SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = 0; SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = 0; SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 3) = 0; SAC_WL_MT_SCHEDULE_STOP( 3) = SAC_ND_READ( (SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 4) = 0; SAC_WL_MT_SCHEDULE_STOP( 4) = SAC_ND_READ( (SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * WL_INIT_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_WL_MT_SCHEDULE_START( 0) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) + SAC_WL_MT_SCHEDULE_START( 1) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) + SAC_WL_MT_SCHEDULE_START( 2) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) + SAC_WL_MT_SCHEDULE_START( 3) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) + SAC_WL_MT_SCHEDULE_START( 4) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) /* * WL_SET_OFFSET( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, 5, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) )+ SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_WL_MT_SCHEDULE_START( 4) ) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_WL_STRIDE_LOOP0_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_ND_INC_RC((SACp_emal_9315__flat_67, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) SAC_ND_ALLOC_BEGIN((SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 3, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3, SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 3), 26, "Assignment with incompatible types found!"); { int SAC_size = 1; SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_ND_A_DESC_SIZE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_MIRROR_SIZE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_size; SAC_ASSURE_TYPE_LINE ((SAC_ND_A_MIRROR_SIZE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) >= 0), 26, "Array with size <0 found!"); } SAC_ND_ALLOC_END((SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 3, float) SAC_ND_ALLOC_BEGIN((SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 26, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 26, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 26, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 26, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_PF_BEGIN_WITH(genarray) /* * WL_SCHEDULE__BEGIN( 3) */ { int SAC_WL_MT_SCHEDULE_START( 0); int SAC_WL_MT_SCHEDULE_STOP( 0); int SAC_WL_MT_SCHEDULE_START( 1); int SAC_WL_MT_SCHEDULE_STOP( 1); int SAC_WL_MT_SCHEDULE_START( 2); int SAC_WL_MT_SCHEDULE_STOP( 2); /* * WL_DECLARE_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) */ int SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); /* * WL_DEFINE_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) */ { int SAC_i; SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = 1 * SAC_ND_A_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_A_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = 1 * SAC_ND_A_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = 1; } /* * MT_SCHEDULER_BEGIN( 0, 3, 0, 0, 0, SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = 0; SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = 0; SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * WL_INIT_OFFSET( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) */ SAC_ND_WRITE( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_WL_MT_SCHEDULE_START( 0) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) + SAC_WL_MT_SCHEDULE_START( 1) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) + SAC_WL_MT_SCHEDULE_START( 2) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) /* * WL_SET_OFFSET( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 3, (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) * SAC_ND_READ( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_WL_MT_SCHEDULE_START( 2) ) * SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_ND_ALLOC_BEGIN((SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_PRF_SxS__DATA((SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_ADD, SAC_ND_READ((SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9307__ivesli_8133, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_id( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) * SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_PRF_SxS__DATA((SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), SAC_ND_PRF_ADD, SAC_ND_READ((SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ((SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) SAC_ND_ALLOC_BEGIN((SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9304__ivesli_8128, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_PRF_SxS__DATA((SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_MUL, SAC_ND_READ((SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), SAC_ND_READ((SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0)) SAC_ND_FREE((SACp_emal_9302__pinl_2076__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) /* * WL_ASSIGN( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == (SAC_ND_A_DIM( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) - SAC_ND_A_SIZE( (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))))), 26, "WL expression with illegal dimension found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_WL_SHAPE_FACTOR( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2)), 26, "WL expression with illegal size found!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_WL_INC_OFFSET((SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_emal_9305__pinl_2061__flat_140, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) SAC_WL_GRID_UNROLL_END(2, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_flat_119, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 3, 0, 0, 0, SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * WL_SCHEDULE__END( 3) */ } SAC_PF_END_WITH(genarray) SAC_ND_LABEL(_comp_9407_SAC_label) SAC_ND_FREE((SACl_i, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_kh, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_kw, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_wlidx_8018__flat_21, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9309__ivesli_8126, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9310__ivesli_8131, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 629, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 629, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 629, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_ASSIGN( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9315__flat_67, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 629, "Assignment with incompatible types found!"); SAC_NOOP() SAC_NOOP() SAC_NOOP() SAC_ND_ASSIGN__DATA( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9315__flat_67, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_PF_BEGIN_WITH(fold) /* * WL_SCHEDULE__BEGIN( 3) */ { int SAC_WL_MT_SCHEDULE_START( 0); int SAC_WL_MT_SCHEDULE_STOP( 0); int SAC_WL_MT_SCHEDULE_START( 1); int SAC_WL_MT_SCHEDULE_STOP( 1); int SAC_WL_MT_SCHEDULE_START( 2); int SAC_WL_MT_SCHEDULE_STOP( 2); /* * MT_SCHEDULER_BEGIN( 0, 3, 0, 0, 0, SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_WL_MT_SCHEDULE_START( 0) = 0; SAC_WL_MT_SCHEDULE_STOP( 0) = SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 1) = 0; SAC_WL_MT_SCHEDULE_STOP( 1) = SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_MT_SCHEDULE_START( 2) = 0; SAC_WL_MT_SCHEDULE_STOP( 2) = SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_WL_STRIDE_LOOP0_BEGIN(0, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(0, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(1, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(1, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP0_BEGIN(2, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_BEGIN(2, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_NOOP() SAC_ND_ALLOC_BEGIN((SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 629, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_IDXS2OFFSET_arr( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_ND_WRITE( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) * ( SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) * SAC_ND_READ( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 629, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -5, (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 629, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9299__ivesli_8124, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_PRF_SxS__DATA((SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_ADD, SAC_ND_READ((SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0), SAC_ND_READ((SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0)) SAC_ND_DEC_RC_FREE((SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, ) /* * ND_ASSIGN( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 117, "Assignment with incompatible types found!"); SAC_NOOP() SAC_NOOP() SAC_NOOP() SAC_ND_ASSIGN__DATA( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9298__pinl_2086__flat_3951, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) /* * WL_FOLD( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3, (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_TR_WL_PRINT( ("index vector [%d, %d, %d] -- fold", SAC_ND_READ( (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); /* fold operation */ SAC_WL_GRID_UNROLL_END(2, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_pinl_2085_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 3, 0, 0, 0, SAC_ND_READ( (SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * WL_SCHEDULE__END( 3) */ } SAC_PF_END_WITH(fold) SAC_ND_LABEL(_comp_9408_SAC_label) SAC_ND_FREE((SACp_emal_9300__flat_21, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_pinl_2088__eat_513, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_2089__eat_514, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_2090__eat_515, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) /* * WL_ASSIGN( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == (SAC_ND_A_DIM( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) - SAC_ND_A_SIZE( (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))))), 1, "WL expression with illegal dimension found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_WL_SHAPE_FACTOR( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4)), 1, "WL expression with illegal size found!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_DEC_RC_FREE((SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, ) SAC_WL_INC_OFFSET((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_2087_res, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) SAC_WL_GRID_UNROLL_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(4, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(3, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(2, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(1, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) SAC_WL_GRID_UNROLL_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 1) SAC_WL_STRIDE_LOOP_END(0, (SACp_flat_19, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, SAC_ND_READ((SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 1) /* * MT_SCHEDULER_END( 0, 5, 0, 0, 0, 0, 0, SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ /* * WL_SCHEDULE__END( 5) */ } SAC_PF_END_WITH(modarray) SAC_ND_LABEL(_comp_9409_SAC_label) SAC_ND_FREE((SACp_emal_9341__pinl_2047__flat_301__SSA4_8, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9343__pinl_2047__flat_301__SSA4_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9327__wlbsc_1646_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9328__pinl_2279_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9325__wlbsc_1602_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9326__wlbsc_1604_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9331__pinl_2275_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9322__wlbsc_1558_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9323__wlbsc_1560_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9324__wlbsc_1562_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9334__pinl_2271_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9318__wlbsc_1514_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9319__wlbsc_1516_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9320__wlbsc_1518_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9321__wlbsc_1520_sc_e, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9337__pinl_2267_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9340__pinl_2263_z, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_n, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_g, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_o, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_h, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_w, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9317__flat_103, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9315__flat_67, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_emal_9348__pinl_2037__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9349__pinl_2025__flat_68, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9355__isaa_4934_W1, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_wlidx_8017_O, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_DEC_RC_FREE((SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) SAC_ND_DEC_RC_FREE((SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), SAC_ND_READ( (SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_DEC_RC_FREE((SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) SAC_ND_FREE((SACp_emal_9353__flat_6, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_CUDA_ALLOC_BEGIN((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) /* * ND_COPY__SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), -7, (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == 5), 152, "Assignment with incompatible types found!"); { int SAC_size = 1; SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = SAC_ND_A_SHAPE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4); SAC_ND_A_DESC_SIZE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = SAC_ND_A_MIRROR_SIZE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = SAC_size * SAC_ND_A_SIZE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_MIRROR_SIZE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) >= 0), 152, "Array with size <0 found!"); } SAC_CUDA_ALLOC_END((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) /* * CUDA_MEM_TRANSFER( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, cudaMemcpyHostToDevice) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == SAC_ND_A_SIZE( (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))))), 152, "cudaMemcpy: Destionation and source arrays should have equal sizes!"); SAC_CUDA_MEM_TRANSFER((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), (SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, cudaMemcpyHostToDevice) SAC_ND_FREE((SACp_emal_9296_O, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_CUDA_ALLOC_BEGIN((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == 1), 19, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) == 5), 19, "Assignment with incompatible types found!"); SAC_NOOP() SAC_CUDA_ALLOC_END((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 1, int) /* * CUDA_MEM_TRANSFER( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, cudaMemcpyHostToDevice) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == SAC_ND_A_SIZE( (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))), 19, "cudaMemcpy: Destionation and source arrays should have equal sizes!"); SAC_CUDA_MEM_TRANSFER((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), (SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, cudaMemcpyHostToDevice) SAC_ND_FREE((SACp_emal_9354__isaa_4937_I, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_CUDA_ALLOC_BEGIN((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 5, SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) == 5), 28, "Assignment with incompatible types found!"); { int SAC_size = 1; SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = SAC_ND_READ( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_size *= SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = SAC_ND_READ( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); SAC_ND_A_DESC_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = SAC_ND_A_MIRROR_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = SAC_size; SAC_ASSURE_TYPE_LINE ((SAC_ND_A_MIRROR_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) >= 0), 28, "Array with size <0 found!"); } SAC_CUDA_ALLOC_END((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1, 5, float) SAC_ND_ALLOC_BEGIN((SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) /* * ND_SET__SHAPE_arr( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 5) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 1), 28, "Assignment with incompatible types found!"); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SHAPE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) == 5), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 1, int) SAC_ND_ALLOC_BEGIN((SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) SAC_ND_ALLOC_BEGIN((SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * ND_SET__SHAPE_arr( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 28, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1, 0, int) /* * CUDA_GRID_BLOCK( 15, SAC_ND_READ( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9292__cnstass_8342_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9291__cnstass_8341_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9290__cnstass_8340_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9289__cnstass_8339_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), SAC_ND_READ( (SACp_emal_9288__cnstass_8338_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), 0, 0, 0, 0, 0) */ { dim3 grid((SAC_ND_READ( (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_9289__cnstass_8339_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_9288__cnstass_8338_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); if (grid.x > 2147483647 || grid.y > 65535 || grid.z > 65535) { SAC_RuntimeError("CUDA XYZ grid dimension exceeds compute compatibilities max value: 2147483647 x 65535 x 65535"); } dim3 block((SAC_ND_READ( (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_9292__cnstass_8342_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_9291__cnstass_8341_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)), (SAC_ND_READ( (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)-SAC_ND_READ( (SACp_emal_9290__cnstass_8340_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0))); if (block.x > 2147483647 || block.y > 65535 || block.z > 65535) { SAC_RuntimeError("CUDA XYZ block dimension exceeds compute compatibilities max value: 2147483647 x 65535 x 65535"); } /* * CUDA_GLOBALFUN_AP( SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X, 14, inout, float, -7, SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9292__cnstass_8342_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9291__cnstass_8341_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9290__cnstass_8340_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9289__cnstass_8339_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, int, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9288__cnstass_8338_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), in, float, 0, SAC_SET_NT_USG( FAG, (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), in, float, -7, SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), in, int, 1, SAC_SET_NT_USG( FAG, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))) */ SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X<<<grid, block>>>( SAC_CUDA_ARG_inout( SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), float), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 0), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 1), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 2), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 3), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 4), SAC_ND_A_MIRROR_SIZE(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_ND_A_MIRROR_DIM(SAC_SET_NT_USG( FAG, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9292__cnstass_8342_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9291__cnstass_8341_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9290__cnstass_8340_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9289__cnstass_8339_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9288__cnstass_8338_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))), int), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))), float), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), float), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 0), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 1), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 2), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 3), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 4), SAC_ND_A_MIRROR_SIZE(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_ND_A_MIRROR_DIM(SAC_SET_NT_USG( FAG, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_CUDA_ARG_in( SAC_SET_NT_USG( FAG, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), int), SAC_ND_A_MIRROR_SHAPE(SAC_SET_NT_USG( FAG, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))), 0), SAC_ND_A_MIRROR_SIZE(SAC_SET_NT_USG( FAG, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), SAC_ND_A_MIRROR_DIM(SAC_SET_NT_USG( FAG, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))))); SAC_CUDA_GET_LAST_KERNEL_ERROR(); } /* * ND_REFRESH__MIRROR( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), -7) */ SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4); SAC_ND_A_MIRROR_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))); SAC_ND_FREE((SACp_emal_9342__pinl_2046__flat_302__SSA4_4, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9344__pinl_2046__flat_302__SSA4_3, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9345__pinl_2046__flat_302__SSA4_2, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9346__pinl_2046__flat_302__SSA4_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9347__pinl_2046__flat_302, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9292__cnstass_8342_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9291__cnstass_8341_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9290__cnstass_8340_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9289__cnstass_8339_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9288__cnstass_8338_lb, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_CUDA_FREE((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) SAC_CUDA_FREE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) SAC_ND_ALLOC_BEGIN((SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * ND_COPY__SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), -7, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), -7) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 5), 28, "Assignment with incompatible types found!"); { int SAC_size = 1; SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 0); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 1); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 2); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 3); SAC_ND_A_MIRROR_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), 4); SAC_ND_A_DESC_SIZE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_MIRROR_SIZE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) = SAC_size * SAC_ND_A_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, ))))))))))); SAC_ASSURE_TYPE_LINE ((SAC_ND_A_MIRROR_SIZE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) >= 0), 28, "Array with size <0 found!"); } SAC_ND_ALLOC_END((SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 5, float) /* * CUDA_MEM_TRANSFER( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, cudaMemcpyDeviceToHost) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_SIZE( (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == SAC_ND_A_SIZE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))))), 28, "cudaMemcpy: Destionation and source arrays should have equal sizes!"); SAC_CUDA_MEM_TRANSFER((SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), float, cudaMemcpyDeviceToHost) SAC_CUDA_FREE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (OTH, )))))))))), ) /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9282_O__SSA0_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACf__MAIN__gconv__f_X_X_X_X_X__f_X_X_X_X_X__f_1, , 4, out, float, (SAC_arg_1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_I, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_W1, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_B, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACf__MAIN_CLsacprelude_p__zero__f_X(...) [ body ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACf__MAIN_CLsacprelude_p__zero__f_X, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACf__MAIN_CLsacprelude_p__zero__f_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { SAC_ND_DECL_CONST__DATA((SACp_emal_9357__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0.0f) /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -3) */ int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 1; SAC_INIT_LOCAL_MEM() SAC_ND_DEC_RC_FREE((SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9357__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9357__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACf__MAIN_CLsacprelude_p__zero__f_X, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN:sacprelude_p::SACf__MAIN_CLsacprelude_p__zero__f_X_X_X_X_X(...) [ body ] ****************************************************************************/ /* * ND_FUN_DEF_BEGIN( SACf__MAIN_CLsacprelude_p__zero__f_X_X_X_X_X, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_DEF_FUN_BEGIN2( SACf__MAIN_CLsacprelude_p__zero__f_X_X_X_X_X, void, SAC_ND_PARAM_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_ND_PARAM_in( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float)) { SAC_HM_DEFINE_THREAD_STATUS( SAC_HM_single_threaded) SAC_MT_DEFINE_ST_SELF() { SAC_ND_DECL_CONST__DATA((SACp_emal_9358__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0.0f) /* * ND_DECL__MIRROR_PARAM( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), -7) */ int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0) = SAC_ND_A_DESC_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0); int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1) = SAC_ND_A_DESC_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1); int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2) = SAC_ND_A_DESC_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 2); int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3) = SAC_ND_A_DESC_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 3); int SAC_ND_A_MIRROR_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4) = SAC_ND_A_DESC_SHAPE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 4); int SAC_ND_A_MIRROR_SIZE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = SAC_ND_A_DESC_SIZE( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))); const int SAC_ND_A_MIRROR_DIM( (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) = 5; SAC_INIT_LOCAL_MEM() SAC_ND_DEC_RC_FREE((SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 1, ) /* * ND_FUN_RET( , 1, out, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9358__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) */ SAC_ND_RET_out( (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), (SACp_emal_9358__flat_284, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) return; SAC_CLEANUP_LOCAL_MEM() } /* * ND_FUN_DEF_END( SACf__MAIN_CLsacprelude_p__zero__f_X_X_X_X_X, , 2, out, float, (SAC_arg_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), in, float, (SACl_A, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, ))))))))))) */ } SAC_ND_FUN_DEF_END2() /**************************************************************************** * WITH-loop Count: 0 * _MAIN::SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X(...) [ body ] ****************************************************************************/ /* * CUDA_GLOBALFUN_DEF_BEGIN( SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X, 14, inout, float, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), -7, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, float, (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, in, float, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), -7, in, int, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 1) */ __global__ void SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X(SAC_CUDA_PARAM_inout( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_CUDA_PARAM_in( (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), int), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SIZE((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))))){ { /* * ND_DECL( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), float, ) SAC_ND_DECL__DESC( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * CUDA_DECL_KERNEL_ARRAY( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ int SAC_ND_A_FIELD( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))[5]; SAC_ND_DECL__DESC( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; SAC_INIT_LOCAL_MEM() /* * CUDA_WLIDS( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 4, (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_X, SACp_step_4, SACp_width_4, SACp_lb_4, SACp_ub_4) SAC_ND_WRITE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) = SAC_ND_READ( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 3, (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_Y, SACp_step_3, SACp_width_3, SACp_lb_3, SACp_ub_3) SAC_ND_WRITE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) = SAC_ND_READ( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 2, (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_Z, SACp_step_2, SACp_width_2, SACp_lb_2, SACp_ub_2) SAC_ND_WRITE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) = SAC_ND_READ( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 1, (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, BLOCKIDX_X, SACp_step_1, SACp_width_1, SACp_lb_1, SACp_ub_1) SAC_ND_WRITE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) = SAC_ND_READ( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 0, (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, BLOCKIDX_Y, SACp_step_0, SACp_width_0, SACp_lb_0, SACp_ub_0) SAC_ND_WRITE( (SACp_flat_369, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_ND_READ( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * ND_ARRAY_IDXS2OFFSET_id( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_WRITE( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4) * ( SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3) * ( SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2) * ( SAC_ND_A_SHAPE( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1) * SAC_ND_READ( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); /* * ND_IDXS2OFFSET_id( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, ))))))))))) */ SAC_ND_WRITE( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( SAC_ND_READ( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 4) * ( SAC_ND_READ( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 3) * ( SAC_ND_READ( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 2) * ( SAC_ND_READ( (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 1) * SAC_ND_READ( (SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); SAC_ND_ALLOC_BEGIN((SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_SET__SHAPE_arr( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0) */ SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, ))))))))))) == 0), 152, "Assignment with incompatible types found!"); SAC_NOOP() SAC_ND_ALLOC_END((SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 1, 0, float) /* * ND_PRF_IDX_SEL__DATA( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), -7, (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) */ SAC_TR_PRF_PRINT( ("ND_PRF_IDX_SEL__DATA( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), -7, (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))")) SAC_ASSURE_TYPE_LINE ((SAC_ND_A_DIM( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) == 0), 152, "1st argument of _idx_sel_ is not a scalar!"); SAC_ND_WRITE_READ_COPY( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), SAC_ND_READ( (SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), ) SAC_ND_FREE((SACp_emal_9286__ivesli_8123, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_PRF_SxS__DATA((SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), SAC_ND_PRF_MUL, SAC_ND_READ((SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0), SAC_ND_READ((SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0)) /* * CUDA_WL_ASSIGN( (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), -7, (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE_READ_COPY( (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SAC_ND_READ( (SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_emal_9285__pinl_2134__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (NON, (NOT, (NDI, (FLO, )))))))))), ) SAC_ND_FREE((SACp_wlidx_8019_O__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_w__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_h__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_o__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_g__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACl_n__SSA0_1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOOP() SAC_CLEANUP_LOCAL_MEM() } /* * CUDA_GLOBALFUN_DEF_END( SACf__MAIN___cuknl_9403_CUDA__i__i__i__i__i__i__i__i__i__i__f__fd_X_X_X_X_X__id_5__fd_X_X_X_X_X, 14, inout, float, (SACp_emal_9283__iwlmem_8317_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), -7, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, float, (SACp_emal_9295__pinl_2123__flat_140__SSA3_1, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, in, float, (SACp_emal_9294__iwlmem_8316_dev, (AKD, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), -7, in, int, (SACp_emal_9293__iwlmem_8315_dev, (AKS, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (OTH, )))))))))), 1) */ } /**************************************************************************** * WITH-loop Count: 0 * _MAIN::SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f(...) [ body ] ****************************************************************************/ /* * CUDA_GLOBALFUN_DEF_BEGIN( SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, inout, float, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0) */ __global__ void SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f(SAC_CUDA_PARAM_in( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_CUDA_PARAM_inout( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)){ { /* * ND_DECL( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * CUDA_DECL_KERNEL_ARRAY( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ int SAC_ND_A_FIELD( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))[5]; SAC_ND_DECL__DESC( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; SAC_INIT_LOCAL_MEM() /* * CUDA_WLIDS( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 4, (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_X, SACp_step_4, SACp_width_4, SACp_lb_4, SACp_ub_4) SAC_ND_WRITE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) = SAC_ND_READ( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 3, (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_Y, SACp_step_3, SACp_width_3, SACp_lb_3, SACp_ub_3) SAC_ND_WRITE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) = SAC_ND_READ( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 2, (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_Z, SACp_step_2, SACp_width_2, SACp_lb_2, SACp_ub_2) SAC_ND_WRITE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) = SAC_ND_READ( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 1, (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, BLOCKIDX_X, SACp_step_1, SACp_width_1, SACp_lb_1, SACp_ub_1) SAC_ND_WRITE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) = SAC_ND_READ( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 0, (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, BLOCKIDX_Y, SACp_step_0, SACp_width_0, SACp_lb_0, SACp_ub_0) SAC_ND_WRITE( (SACp_pinl_1773_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_ND_READ( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * ND_IDXS2OFFSET_arr( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, 32, 32, 32, 7, 7) */ SAC_ND_WRITE( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( 7 * ( 7 * ( 32 * ( 32 * SAC_ND_READ( (SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); /* * CUDA_WL_ASSIGN( (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE_READ_COPY( (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SAC_ND_READ( (SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_wlidx_7960_W1, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1774__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1775__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1776__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1777__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1778__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOOP() SAC_CLEANUP_LOCAL_MEM() } /* * CUDA_GLOBALFUN_DEF_END( SACf__MAIN___cuknl_9402_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, (SACp_emal_8382__pinl_1769__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, inout, float, (SACp_emal_8369__iwlmem_8314_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0) */ } /**************************************************************************** * WITH-loop Count: 0 * _MAIN::SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f(...) [ body ] ****************************************************************************/ /* * CUDA_GLOBALFUN_DEF_BEGIN( SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, inout, float, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0) */ __global__ void SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f(SAC_CUDA_PARAM_in( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), float), SAC_CUDA_PARAM_inout( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), float), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 0), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 1), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 2), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 3), int SAC_ND_A_MIRROR_SHAPE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 4), int SAC_ND_A_MIRROR_SIZE((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), int SAC_ND_A_MIRROR_DIM((SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, ))))))))))), SAC_CUDA_PARAM_in( (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int), SAC_CUDA_PARAM_in( (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), int)){ { /* * ND_DECL( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * ND_DECL( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 0) */ SAC_ND_DECL__DATA( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() /* * CUDA_DECL_KERNEL_ARRAY( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, 1, 5) */ int SAC_ND_A_FIELD( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))))[5]; SAC_ND_DECL__DESC( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) const int SAC_ND_A_MIRROR_SHAPE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = 5; const int SAC_ND_A_MIRROR_SIZE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 5; const int SAC_ND_A_MIRROR_DIM( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) = 1; SAC_INIT_LOCAL_MEM() /* * CUDA_WLIDS( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 4, (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_X, SACp_step_4, SACp_width_4, SACp_lb_4, SACp_ub_4) SAC_ND_WRITE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 4) = SAC_ND_READ( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 3, (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_Y, SACp_step_3, SACp_width_3, SACp_lb_3, SACp_ub_3) SAC_ND_WRITE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 3) = SAC_ND_READ( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 2, (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, THREADIDX_Z, SACp_step_2, SACp_width_2, SACp_lb_2, SACp_ub_2) SAC_ND_WRITE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 2) = SAC_ND_READ( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 1, (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, BLOCKIDX_X, SACp_step_1, SACp_width_1, SACp_lb_1, SACp_ub_1) SAC_ND_WRITE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 1) = SAC_ND_READ( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * CUDA_WLIDS( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, 5, 0, (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), false) */ SAC_CUDA_WLIDS_HD( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0, BLOCKIDX_Y, SACp_step_0, SACp_width_0, SACp_lb_0, SACp_ub_0) SAC_ND_WRITE( (SACp_pinl_1741_iv, (AKS, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = SAC_ND_READ( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0); /* * ND_IDXS2OFFSET_arr( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 5, 32, 32, 32, 7, 7) */ SAC_ND_WRITE( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) = ( 7 * ( 7 * ( 32 * ( 32 * SAC_ND_READ( (SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) + SAC_ND_READ( (SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ) + SAC_ND_READ( (SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0) ); /* * CUDA_WL_ASSIGN( (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, ))))))))))) */ SAC_ND_WRITE_READ_COPY( (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), SAC_ND_READ( (SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0), (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, ); SAC_ND_FREE((SACp_wlidx_7959_I, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1742__eat_507, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1743__eat_508, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1744__eat_509, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1745__eat_510, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_ND_FREE((SACp_pinl_1746__eat_511, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOOP() SAC_CLEANUP_LOCAL_MEM() } /* * CUDA_GLOBALFUN_DEF_END( SACf__MAIN___cuknl_9401_CUDA__i__i__i__i__i__i__i__i__i__i__fd_32_32_32_7_7__f, 12, in, float, (SACp_emal_8397__pinl_1737__flat_349, (SCL, (NHD, (NUQ, (FLO, (GLO, (FPM, (NOT, (NDI, (FLO, )))))))))), 0, inout, float, (SACp_emal_8384__iwlmem_8313_dev, (AKS, (NHD, (NUQ, (FLO, (GLO, (FPO, (NOT, (NDI, (OTH, )))))))))), 5, in, int, (SACp_ub_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_ub_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_4, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_3, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_2, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_1, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0, in, int, (SACp_lb_0, (SCL, (NHD, (NUQ, (INT, (GLO, (FPM, (NOT, (NDI, (INT, )))))))))), 0) */ } /* * stubs for SACARGfreeDataUdt and SACARGcopyDataUdt */ extern "C" void SACARGfreeDataUdt( int, void *); extern "C" void *SACARGcopyDataUdt( int, int, void *); void SACARGfreeDataUdt( int size, void *data) {} void *SACARGcopyDataUdt( int type, int size, void *data) { return ((void *) 0x0); } int main( int __argc, char *__argv[]) { SAC_ND_DECL__DATA( (SAC_res, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int, ) SAC_ND_DECL__DESC( (SAC_res, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), ) SAC_NOTHING() SAC_MT_SETUP_INITIAL(); SAC_PF_SETUP(); SAC_HM_SETUP(); SAC_MT_SETUP(); SAC_CS_SETUP(); SAC_COMMANDLINE_SET( __argc, __argv); SAC_INVOKE_MAIN_FUN( SACf__MAIN__main, SAC_ND_ARG_out( (SAC_res, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), int)); SAC_DISTMEM_BARRIER(); SAC_PF_PRINT(); SAC_CS_FINALIZE(); SAC_MT_FINALIZE(); SAC_HM_PRINT(); return( SAC_ND_READ( (SAC_res, (SCL, (NHD, (NUQ, (INT, (GLO, (NON, (NOT, (NDI, (INT, )))))))))), 0)); }
5424544501a8073d800c3696cad52e20b45800b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common/book.h" #include "common/cpu_anim.h" constexpr size_t DIM = 2048; constexpr float PI = 3.1415926535897932f; constexpr float MAX_TEMP = 1.0f; constexpr float MIN_TEMP = 0.0001f; constexpr float SPEED = 0.25f; texture<float, 2> texConstSrc; texture<float, 2> texIn; texture<float, 2> texOut; // , __global__ void copy_const_kernel(float *iPtr, const float *cPtr) {// thredIdx/blockIdx int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float c = tex2D<float>(texConstSrc, x, y); if (c != 0) iPtr[offset] = c; } __global__ void blend_kernel(float *dst, bool dstOut) {// threadIdx/blockIdx int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float t, l, c, r, b; if (dstOut) { t = tex2D<float>(texIn, x, y - 1); l = tex2D<float>(texIn, x - 1, y); c = tex2D<float>(texIn, x, y); r = tex2D<float>(texIn, x + 1, y); b = tex2D<float>(texIn, x, y + 1); } else { t = tex2D<float>(texOut, x, y - 1); l = tex2D<float>(texOut, x - 1, y); c = tex2D<float>(texOut, x, y); r = tex2D<float>(texOut, x + 1, y); b = tex2D<float>(texOut, x, y + 1); } dst[offset] = c + SPEED*( t + b + r + l - 4*c); } struct DataBlock { unsigned char *output_bitmap; float *dev_inSrc; float *dev_outSrc; float *dev_constSrc; CPUAnimBitmap *bitmap; hipEvent_t start, stop; float totalTime; float frames; }; void anim_gpu (DataBlock *d, int ticks) { HANDLE_ERROR (hipEventRecord (d->start, 0)); dim3 blocks(DIM/16, DIM/16); dim3 threads(16,16); CPUAnimBitmap * bitmap = d-> bitmap; for (int i=0; i<100000; i++) {hipLaunchKernelGGL(( copy_const_kernel), dim3(blocks), dim3(threads), 0, 0, d->dev_inSrc, d-> dev_constSrc ); hipLaunchKernelGGL(( blend_kernel), dim3(blocks), dim3(threads), 0, 0, d-> dev_outSrc, d-> dev_inSrc ); swap ( d-> dev_inSrc, d-> dev_outSrc ); } hipLaunchKernelGGL(( float_to_color), dim3(blocks), dim3(threads), 0, 0, d->output_bitmap, d->dev_inSrc ); HANDLE_ERROR (hipMemcpy (bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), hipMemcpyDeviceToHost)); HANDLE_ERROR (hipEventRecord (d->stop,0)); HANDLE_ERROR (hipEventSynchronize(d->stop)); float elapsedTime; HANDLE_ERROR (hipEventElapsedTime (&elapsedTime, d->start, d->stop)); d->totalTime += elapsedTime; ++d->frames; printf(" : %3.1f ms\n", d->totalTime / d->frames); } void anim_exit(DataBlock *d) { hipFree (d->dev_inSrc); hipFree (d->dev_outSrc); hipFree (d->dev_constSrc); HANDLE_ERROR (hipEventDestroy (d->start)); HANDLE_ERROR (hipEventDestroy (d->stop)); } int main(void) { DataBlock data; CPUAnimBitmap bitmap(DIM, DIM, &data); data.bitmap = & bitmap; data.totalTime = 0; data.frames = 0; HANDLE_ERROR (hipEventCreate (&data.start)); HANDLE_ERROR (hipEventCreate (&data.stop)); HANDLE_ERROR (hipMalloc ((void**)&data.output_bitmap, bitmap.image_size())); // , float 4 (.. rgba) HANDLE_ERROR (hipMalloc ((void**)&data.dev_inSrc, bitmap.image_size())); HANDLE_ERROR (hipMalloc ((void**)&data.dev_outSrc, bitmap.image_size())); HANDLE_ERROR (hipMalloc ((void**)&data.dev_constSrc, bitmap.image_size())); hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); HANDLE_ERROR (hipBindTexture2D (nullptr, texConstSrc, data.dev_constSrc, desc, DIM, DIM, sizeof(float)*DIM)); HANDLE_ERROR (hipBindTexture2D (nullptr, texIn, data.dev_inSrc, desc, DIM, DIM, sizeof(float)*DIM)); HANDLE_ERROR (hipBindTexture2D (nullptr, texOut, data.dev_outSrc, desc, DIM, DIM, sizeof(float)*DIM)); auto *temp = (float*)malloc(bitmap.image_size()); for (int i = 0; i<DIM*DIM; i++) { temp[i] = 0; int x = i % DIM; int y = i / DIM; if ((x>300) && (x<600) && (y>310) && (y<601)) temp[i] = MAX_TEMP; } temp[DIM*100+100] = (MAX_TEMP + MIN_TEMP) / 2; temp[DIM*700+100] = MIN_TEMP; temp[DIM*300+300] = MIN_TEMP; temp[DIM*200+700] = MIN_TEMP; for (int y = 800; y < 900; y++) { for (int x = 400; x < 500; x++) temp[x+y*DIM] = MIN_TEMP; } HANDLE_ERROR (hipMemcpy (data.dev_constSrc, temp, bitmap.image_size(), hipMemcpyHostToDevice)); for (int y = 800; y < DIM; y++) { for (int x = 0; x < 200; x++) temp[x+y*DIM] = MAX_TEMP; } HANDLE_ERROR (hipMemcpy (data.dev_inSrc, temp, bitmap.image_size(), hipMemcpyHostToDevice)); free(temp); bitmap.anim_and_exit ( (void (*)(void*, int)) anim_gpu, (void (*)(void*)) anim_exit); }
5424544501a8073d800c3696cad52e20b45800b5.cu
#include "cuda.h" #include "common/book.h" #include "common/cpu_anim.h" constexpr size_t DIM = 2048; constexpr float PI = 3.1415926535897932f; constexpr float MAX_TEMP = 1.0f; constexpr float MIN_TEMP = 0.0001f; constexpr float SPEED = 0.25f; texture<float, 2> texConstSrc; texture<float, 2> texIn; texture<float, 2> texOut; // глобальные данные, необходимые функции обновления __global__ void copy_const_kernel(float *iPtr, const float *cPtr) {// отобразить пару thredIdx/blockIdx на позицию пикселя int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float c = tex2D<float>(texConstSrc, x, y); if (c != 0) iPtr[offset] = c; } __global__ void blend_kernel(float *dst, bool dstOut) {// отобразить пару threadIdx/blockIdx на позицию пикселя int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float t, l, c, r, b; if (dstOut) { t = tex2D<float>(texIn, x, y - 1); l = tex2D<float>(texIn, x - 1, y); c = tex2D<float>(texIn, x, y); r = tex2D<float>(texIn, x + 1, y); b = tex2D<float>(texIn, x, y + 1); } else { t = tex2D<float>(texOut, x, y - 1); l = tex2D<float>(texOut, x - 1, y); c = tex2D<float>(texOut, x, y); r = tex2D<float>(texOut, x + 1, y); b = tex2D<float>(texOut, x, y + 1); } dst[offset] = c + SPEED*( t + b + r + l - 4*c); } struct DataBlock { unsigned char *output_bitmap; float *dev_inSrc; float *dev_outSrc; float *dev_constSrc; CPUAnimBitmap *bitmap; cudaEvent_t start, stop; float totalTime; float frames; }; void anim_gpu (DataBlock *d, int ticks) { HANDLE_ERROR (cudaEventRecord (d->start, 0)); dim3 blocks(DIM/16, DIM/16); dim3 threads(16,16); CPUAnimBitmap * bitmap = d-> bitmap; for (int i=0; i<100000; i++) { copy_const_kernel<<<blocks, threads>>>( d->dev_inSrc, d-> dev_constSrc ); blend_kernel<<<blocks, threads>>>( d-> dev_outSrc, d-> dev_inSrc ); swap ( d-> dev_inSrc, d-> dev_outSrc ); } float_to_color<<<blocks, threads>>>( d->output_bitmap, d->dev_inSrc ); HANDLE_ERROR (cudaMemcpy (bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), cudaMemcpyDeviceToHost)); HANDLE_ERROR (cudaEventRecord (d->stop,0)); HANDLE_ERROR (cudaEventSynchronize(d->stop)); float elapsedTime; HANDLE_ERROR (cudaEventElapsedTime (&elapsedTime, d->start, d->stop)); d->totalTime += elapsedTime; ++d->frames; printf("Среднее время на один кадр: %3.1f ms\n", d->totalTime / d->frames); } void anim_exit(DataBlock *d) { cudaFree (d->dev_inSrc); cudaFree (d->dev_outSrc); cudaFree (d->dev_constSrc); HANDLE_ERROR (cudaEventDestroy (d->start)); HANDLE_ERROR (cudaEventDestroy (d->stop)); } int main(void) { DataBlock data; CPUAnimBitmap bitmap(DIM, DIM, &data); data.bitmap = & bitmap; data.totalTime = 0; data.frames = 0; HANDLE_ERROR (cudaEventCreate (&data.start)); HANDLE_ERROR (cudaEventCreate (&data.stop)); HANDLE_ERROR (cudaMalloc ((void**)&data.output_bitmap, bitmap.image_size())); // предполагаем, что размер float равен 4 байтам (т.е. rgba) HANDLE_ERROR (cudaMalloc ((void**)&data.dev_inSrc, bitmap.image_size())); HANDLE_ERROR (cudaMalloc ((void**)&data.dev_outSrc, bitmap.image_size())); HANDLE_ERROR (cudaMalloc ((void**)&data.dev_constSrc, bitmap.image_size())); cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); HANDLE_ERROR (cudaBindTexture2D (nullptr, texConstSrc, data.dev_constSrc, desc, DIM, DIM, sizeof(float)*DIM)); HANDLE_ERROR (cudaBindTexture2D (nullptr, texIn, data.dev_inSrc, desc, DIM, DIM, sizeof(float)*DIM)); HANDLE_ERROR (cudaBindTexture2D (nullptr, texOut, data.dev_outSrc, desc, DIM, DIM, sizeof(float)*DIM)); auto *temp = (float*)malloc(bitmap.image_size()); for (int i = 0; i<DIM*DIM; i++) { temp[i] = 0; int x = i % DIM; int y = i / DIM; if ((x>300) && (x<600) && (y>310) && (y<601)) temp[i] = MAX_TEMP; } temp[DIM*100+100] = (MAX_TEMP + MIN_TEMP) / 2; temp[DIM*700+100] = MIN_TEMP; temp[DIM*300+300] = MIN_TEMP; temp[DIM*200+700] = MIN_TEMP; for (int y = 800; y < 900; y++) { for (int x = 400; x < 500; x++) temp[x+y*DIM] = MIN_TEMP; } HANDLE_ERROR (cudaMemcpy (data.dev_constSrc, temp, bitmap.image_size(), cudaMemcpyHostToDevice)); for (int y = 800; y < DIM; y++) { for (int x = 0; x < 200; x++) temp[x+y*DIM] = MAX_TEMP; } HANDLE_ERROR (cudaMemcpy (data.dev_inSrc, temp, bitmap.image_size(), cudaMemcpyHostToDevice)); free(temp); bitmap.anim_and_exit ( (void (*)(void*, int)) anim_gpu, (void (*)(void*)) anim_exit); }
049ccf5a3c4cf4dc23d82d61ac73edcc2fe7402c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_z1; int xdim0_advec_mom_kernel_z1_h = -1; __constant__ int ydim0_advec_mom_kernel_z1; int ydim0_advec_mom_kernel_z1_h = -1; __constant__ int xdim1_advec_mom_kernel_z1; int xdim1_advec_mom_kernel_z1_h = -1; __constant__ int ydim1_advec_mom_kernel_z1; int ydim1_advec_mom_kernel_z1_h = -1; __constant__ int xdim2_advec_mom_kernel_z1; int xdim2_advec_mom_kernel_z1_h = -1; __constant__ int ydim2_advec_mom_kernel_z1; int ydim2_advec_mom_kernel_z1_h = -1; __constant__ int xdim3_advec_mom_kernel_z1; int xdim3_advec_mom_kernel_z1_h = -1; __constant__ int ydim3_advec_mom_kernel_z1; int ydim3_advec_mom_kernel_z1_h = -1; __constant__ int xdim4_advec_mom_kernel_z1; int xdim4_advec_mom_kernel_z1_h = -1; __constant__ int ydim4_advec_mom_kernel_z1; int ydim4_advec_mom_kernel_z1_h = -1; __constant__ int xdim5_advec_mom_kernel_z1; int xdim5_advec_mom_kernel_z1_h = -1; __constant__ int ydim5_advec_mom_kernel_z1; int ydim5_advec_mom_kernel_z1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_mom_kernel_z1 * (y) + \ xdim0_advec_mom_kernel_z1 * ydim0_advec_mom_kernel_z1 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_mom_kernel_z1 * (y) + \ xdim1_advec_mom_kernel_z1 * ydim1_advec_mom_kernel_z1 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_mom_kernel_z1 * (y) + \ xdim2_advec_mom_kernel_z1 * ydim2_advec_mom_kernel_z1 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_mom_kernel_z1 * (y) + \ xdim3_advec_mom_kernel_z1 * ydim3_advec_mom_kernel_z1 * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_mom_kernel_z1 * (y) + \ xdim4_advec_mom_kernel_z1 * ydim4_advec_mom_kernel_z1 * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_advec_mom_kernel_z1 * (y) + \ xdim5_advec_mom_kernel_z1 * ydim5_advec_mom_kernel_z1 * (z)) // user function __device__ inline void advec_mom_kernel_z1_gpu(double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x, const double *vol_flux_y, const double *vol_flux_z) { post_vol[OPS_ACC1(0, 0, 0)] = volume[OPS_ACC2(0, 0, 0)] + vol_flux_x[OPS_ACC3(1, 0, 0)] - vol_flux_x[OPS_ACC3(0, 0, 0)] + vol_flux_y[OPS_ACC4(0, 1, 0)] - vol_flux_y[OPS_ACC4(0, 0, 0)]; pre_vol[OPS_ACC0(0, 0, 0)] = post_vol[OPS_ACC1(0, 0, 0)] + vol_flux_z[OPS_ACC5(0, 0, 1)] - vol_flux_z[OPS_ACC5(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_advec_mom_kernel_z1(double *__restrict arg0, double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_z1 + idx_z * 1 * 1 * xdim0_advec_mom_kernel_z1 * ydim0_advec_mom_kernel_z1; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_z1 + idx_z * 1 * 1 * xdim1_advec_mom_kernel_z1 * ydim1_advec_mom_kernel_z1; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel_z1 + idx_z * 1 * 1 * xdim2_advec_mom_kernel_z1 * ydim2_advec_mom_kernel_z1; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel_z1 + idx_z * 1 * 1 * xdim3_advec_mom_kernel_z1 * ydim3_advec_mom_kernel_z1; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_mom_kernel_z1 + idx_z * 1 * 1 * xdim4_advec_mom_kernel_z1 * ydim4_advec_mom_kernel_z1; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_mom_kernel_z1 + idx_z * 1 * 1 * xdim5_advec_mom_kernel_z1 * ydim5_advec_mom_kernel_z1; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_z1_gpu(arg0, arg1, arg2, arg3, arg4, arg5); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_z1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { #else void ops_par_loop_advec_mom_kernel_z1_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; #endif // Timing double t1, t2, c1, c2; ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 6, range, 122)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(122, "advec_mom_kernel_z1"); OPS_kernels[122].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_z1_h || ydim0 != ydim0_advec_mom_kernel_z1_h || xdim1 != xdim1_advec_mom_kernel_z1_h || ydim1 != ydim1_advec_mom_kernel_z1_h || xdim2 != xdim2_advec_mom_kernel_z1_h || ydim2 != ydim2_advec_mom_kernel_z1_h || xdim3 != xdim3_advec_mom_kernel_z1_h || ydim3 != ydim3_advec_mom_kernel_z1_h || xdim4 != xdim4_advec_mom_kernel_z1_h || ydim4 != ydim4_advec_mom_kernel_z1_h || xdim5 != xdim5_advec_mom_kernel_z1_h || ydim5 != ydim5_advec_mom_kernel_z1_h) { hipMemcpyToSymbol(xdim0_advec_mom_kernel_z1, &xdim0, sizeof(int)); xdim0_advec_mom_kernel_z1_h = xdim0; hipMemcpyToSymbol(ydim0_advec_mom_kernel_z1, &ydim0, sizeof(int)); ydim0_advec_mom_kernel_z1_h = ydim0; hipMemcpyToSymbol(xdim1_advec_mom_kernel_z1, &xdim1, sizeof(int)); xdim1_advec_mom_kernel_z1_h = xdim1; hipMemcpyToSymbol(ydim1_advec_mom_kernel_z1, &ydim1, sizeof(int)); ydim1_advec_mom_kernel_z1_h = ydim1; hipMemcpyToSymbol(xdim2_advec_mom_kernel_z1, &xdim2, sizeof(int)); xdim2_advec_mom_kernel_z1_h = xdim2; hipMemcpyToSymbol(ydim2_advec_mom_kernel_z1, &ydim2, sizeof(int)); ydim2_advec_mom_kernel_z1_h = ydim2; hipMemcpyToSymbol(xdim3_advec_mom_kernel_z1, &xdim3, sizeof(int)); xdim3_advec_mom_kernel_z1_h = xdim3; hipMemcpyToSymbol(ydim3_advec_mom_kernel_z1, &ydim3, sizeof(int)); ydim3_advec_mom_kernel_z1_h = ydim3; hipMemcpyToSymbol(xdim4_advec_mom_kernel_z1, &xdim4, sizeof(int)); xdim4_advec_mom_kernel_z1_h = xdim4; hipMemcpyToSymbol(ydim4_advec_mom_kernel_z1, &ydim4, sizeof(int)); ydim4_advec_mom_kernel_z1_h = ydim4; hipMemcpyToSymbol(xdim5_advec_mom_kernel_z1, &xdim5, sizeof(int)); xdim5_advec_mom_kernel_z1_h = xdim5; hipMemcpyToSymbol(ydim5_advec_mom_kernel_z1, &ydim5, sizeof(int)); ydim5_advec_mom_kernel_z1_h = ydim5; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); char *p_a[6]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args, 6, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[122].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_advec_mom_kernel_z1), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[122].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[122].mpi_time += t2 - t1; OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg5); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_z1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 122; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 122; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 6; desc->args = (ops_arg *)malloc(6 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->function = ops_par_loop_advec_mom_kernel_z1_execute; if (OPS_diags > 1) { ops_timing_realloc(122, "advec_mom_kernel_z1"); } ops_enqueue_kernel(desc); } #endif
049ccf5a3c4cf4dc23d82d61ac73edcc2fe7402c.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_z1; int xdim0_advec_mom_kernel_z1_h = -1; __constant__ int ydim0_advec_mom_kernel_z1; int ydim0_advec_mom_kernel_z1_h = -1; __constant__ int xdim1_advec_mom_kernel_z1; int xdim1_advec_mom_kernel_z1_h = -1; __constant__ int ydim1_advec_mom_kernel_z1; int ydim1_advec_mom_kernel_z1_h = -1; __constant__ int xdim2_advec_mom_kernel_z1; int xdim2_advec_mom_kernel_z1_h = -1; __constant__ int ydim2_advec_mom_kernel_z1; int ydim2_advec_mom_kernel_z1_h = -1; __constant__ int xdim3_advec_mom_kernel_z1; int xdim3_advec_mom_kernel_z1_h = -1; __constant__ int ydim3_advec_mom_kernel_z1; int ydim3_advec_mom_kernel_z1_h = -1; __constant__ int xdim4_advec_mom_kernel_z1; int xdim4_advec_mom_kernel_z1_h = -1; __constant__ int ydim4_advec_mom_kernel_z1; int ydim4_advec_mom_kernel_z1_h = -1; __constant__ int xdim5_advec_mom_kernel_z1; int xdim5_advec_mom_kernel_z1_h = -1; __constant__ int ydim5_advec_mom_kernel_z1; int ydim5_advec_mom_kernel_z1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_mom_kernel_z1 * (y) + \ xdim0_advec_mom_kernel_z1 * ydim0_advec_mom_kernel_z1 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_mom_kernel_z1 * (y) + \ xdim1_advec_mom_kernel_z1 * ydim1_advec_mom_kernel_z1 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_mom_kernel_z1 * (y) + \ xdim2_advec_mom_kernel_z1 * ydim2_advec_mom_kernel_z1 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_mom_kernel_z1 * (y) + \ xdim3_advec_mom_kernel_z1 * ydim3_advec_mom_kernel_z1 * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_mom_kernel_z1 * (y) + \ xdim4_advec_mom_kernel_z1 * ydim4_advec_mom_kernel_z1 * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_advec_mom_kernel_z1 * (y) + \ xdim5_advec_mom_kernel_z1 * ydim5_advec_mom_kernel_z1 * (z)) // user function __device__ inline void advec_mom_kernel_z1_gpu(double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x, const double *vol_flux_y, const double *vol_flux_z) { post_vol[OPS_ACC1(0, 0, 0)] = volume[OPS_ACC2(0, 0, 0)] + vol_flux_x[OPS_ACC3(1, 0, 0)] - vol_flux_x[OPS_ACC3(0, 0, 0)] + vol_flux_y[OPS_ACC4(0, 1, 0)] - vol_flux_y[OPS_ACC4(0, 0, 0)]; pre_vol[OPS_ACC0(0, 0, 0)] = post_vol[OPS_ACC1(0, 0, 0)] + vol_flux_z[OPS_ACC5(0, 0, 1)] - vol_flux_z[OPS_ACC5(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_advec_mom_kernel_z1(double *__restrict arg0, double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_z1 + idx_z * 1 * 1 * xdim0_advec_mom_kernel_z1 * ydim0_advec_mom_kernel_z1; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_z1 + idx_z * 1 * 1 * xdim1_advec_mom_kernel_z1 * ydim1_advec_mom_kernel_z1; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel_z1 + idx_z * 1 * 1 * xdim2_advec_mom_kernel_z1 * ydim2_advec_mom_kernel_z1; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel_z1 + idx_z * 1 * 1 * xdim3_advec_mom_kernel_z1 * ydim3_advec_mom_kernel_z1; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_mom_kernel_z1 + idx_z * 1 * 1 * xdim4_advec_mom_kernel_z1 * ydim4_advec_mom_kernel_z1; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_mom_kernel_z1 + idx_z * 1 * 1 * xdim5_advec_mom_kernel_z1 * ydim5_advec_mom_kernel_z1; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_z1_gpu(arg0, arg1, arg2, arg3, arg4, arg5); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_z1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { #else void ops_par_loop_advec_mom_kernel_z1_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; #endif // Timing double t1, t2, c1, c2; ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 6, range, 122)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(122, "advec_mom_kernel_z1"); OPS_kernels[122].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_z1_h || ydim0 != ydim0_advec_mom_kernel_z1_h || xdim1 != xdim1_advec_mom_kernel_z1_h || ydim1 != ydim1_advec_mom_kernel_z1_h || xdim2 != xdim2_advec_mom_kernel_z1_h || ydim2 != ydim2_advec_mom_kernel_z1_h || xdim3 != xdim3_advec_mom_kernel_z1_h || ydim3 != ydim3_advec_mom_kernel_z1_h || xdim4 != xdim4_advec_mom_kernel_z1_h || ydim4 != ydim4_advec_mom_kernel_z1_h || xdim5 != xdim5_advec_mom_kernel_z1_h || ydim5 != ydim5_advec_mom_kernel_z1_h) { cudaMemcpyToSymbol(xdim0_advec_mom_kernel_z1, &xdim0, sizeof(int)); xdim0_advec_mom_kernel_z1_h = xdim0; cudaMemcpyToSymbol(ydim0_advec_mom_kernel_z1, &ydim0, sizeof(int)); ydim0_advec_mom_kernel_z1_h = ydim0; cudaMemcpyToSymbol(xdim1_advec_mom_kernel_z1, &xdim1, sizeof(int)); xdim1_advec_mom_kernel_z1_h = xdim1; cudaMemcpyToSymbol(ydim1_advec_mom_kernel_z1, &ydim1, sizeof(int)); ydim1_advec_mom_kernel_z1_h = ydim1; cudaMemcpyToSymbol(xdim2_advec_mom_kernel_z1, &xdim2, sizeof(int)); xdim2_advec_mom_kernel_z1_h = xdim2; cudaMemcpyToSymbol(ydim2_advec_mom_kernel_z1, &ydim2, sizeof(int)); ydim2_advec_mom_kernel_z1_h = ydim2; cudaMemcpyToSymbol(xdim3_advec_mom_kernel_z1, &xdim3, sizeof(int)); xdim3_advec_mom_kernel_z1_h = xdim3; cudaMemcpyToSymbol(ydim3_advec_mom_kernel_z1, &ydim3, sizeof(int)); ydim3_advec_mom_kernel_z1_h = ydim3; cudaMemcpyToSymbol(xdim4_advec_mom_kernel_z1, &xdim4, sizeof(int)); xdim4_advec_mom_kernel_z1_h = xdim4; cudaMemcpyToSymbol(ydim4_advec_mom_kernel_z1, &ydim4, sizeof(int)); ydim4_advec_mom_kernel_z1_h = ydim4; cudaMemcpyToSymbol(xdim5_advec_mom_kernel_z1, &xdim5, sizeof(int)); xdim5_advec_mom_kernel_z1_h = xdim5; cudaMemcpyToSymbol(ydim5_advec_mom_kernel_z1, &ydim5, sizeof(int)); ydim5_advec_mom_kernel_z1_h = ydim5; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); char *p_a[6]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args, 6, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[122].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_advec_mom_kernel_z1<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[122].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[122].mpi_time += t2 - t1; OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg5); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_z1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 122; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 122; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 6; desc->args = (ops_arg *)malloc(6 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->function = ops_par_loop_advec_mom_kernel_z1_execute; if (OPS_diags > 1) { ops_timing_realloc(122, "advec_mom_kernel_z1"); } ops_enqueue_kernel(desc); } #endif
6292ac4c0f31f236bc20283d3f05c906a05a6155.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "XSbench_header.cuh" //////////////////////////////////////////////////////////////////////////////////// // BASELINE FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // All "baseline" code is at the top of this file. The baseline code is a simple // port of the original CPU OpenMP code to CUDA with few significant changes or // optimizations made. Following these functions are a number of optimized variants, // which each deploy a different combination of optimizations strategies. By // default, XSBench will only run the baseline implementation. Optimized variants // must be specifically selected using the "-k <optimized variant ID>" command // line argument. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_baseline(Inputs in, SimulationData GSD, int mype) { //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Running baseline event-based simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( xs_lookup_kernel_baseline), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); #ifdef PRINT #ifdef FORWARD_PASS double here = 1.23456; gpuErrchk( hipMemcpy(&here, GSD.dout, sizeof(double), hipMemcpyDeviceToHost) ); printf("fwdhere=%f\n", here); printf("der=%f\n", here); #else //size_t num = (GSD.length_nuclide_grid < 10 ) ? GSD.length_nuclide_grid : 10 ; size_t num = 1; double here[num]; gpuErrchk( hipMemcpy(&here[0], &GSD.d_nuclide_grid[0].energy, num * sizeof(double), hipMemcpyDeviceToHost) ); for (int i=0; i<num; i++) printf("bwdhere=%f\n", here[i]); printf("der=%f\n", here[0]); #endif #endif unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; } template<typename... Args> __device__ void __enzyme_autodiff(void*, Args...); __device__ int enzyme_dup, enzyme_const, enzyme_active; // In this kernel, we perform a single lookup with each thread. Threads within a warp // do not really have any relation to each other, and divergence due to high nuclide count fuel // material lookups are costly. This kernel constitutes baseline performance. __global__ void xs_lookup_kernel_baseline(Inputs in, SimulationData GSD ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); double macro_xs_vector[5] = {0}; #ifdef PRINT double d_macro_xs_vector[5] = {0.0}; d_macro_xs_vector[0] = 1.0; #else double d_macro_xs_vector[5] = {1.0}; #endif //if (i == 0) // printf("Running correct sim\n"); // Perform macroscopic Cross Section Lookup #ifdef FORWARD_PASS calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); #ifdef PRINT double macro_xs_vector2[5] = {0}; calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.d_nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector2, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); //if ((macro_xs_vector2[0] - macro_xs_vector[0]) / DELTA) // printf("i=%d dd=%f, out2=%f out1=%f | in2=%f in1=%f\n", i, (macro_xs_vector2[0] - macro_xs_vector[0]) / DELTA, macro_xs_vector2[0], macro_xs_vector[0], GSD.d_nuclide_grid[0].energy, GSD.nuclide_grid[0].energy); atomicAdd(GSD.dout, (macro_xs_vector2[0] - macro_xs_vector[0]) / DELTA ); #endif #else __enzyme_autodiff((void*)calculate_macro_xs, enzyme_const, p_energy, // Sampled neutron energy (in lethargy) enzyme_const, mat, // Sampled material type index neutron is in enzyme_const, in.n_isotopes, // Total number of isotopes in simulation enzyme_const, in.n_gridpoints, // Number of gridpoints per isotope in simulation enzyme_const, GSD.num_nucs, // 1-D array with number of nuclides per material enzyme_const, GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material enzyme_const, GSD.unionized_energy_array, // 1-D Unionized energy array enzyme_const, GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level //enzyme_const, GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation enzyme_dup, GSD.nuclide_grid, GSD.d_nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation enzyme_const, GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material //enzyme_const, macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) enzyme_dup, macro_xs_vector, d_macro_xs_vector,// 1-D array with result of the macroscopic cross section (5 different reaction channels) enzyme_const, in.grid_type, // Lookup type (nuclide, hash, or unionized) enzyme_const, in.hash_bins, // Number of hash bins used (if using hash lookup type) enzyme_const, GSD.max_num_nucs // Maximum number of nuclides present in any material ); #endif // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } // Calculates the microscopic cross section for a given nuclide & energy //__attribute__((noinline)) #ifdef ALWAYS_INLINE __attribute__((always_inline)) #else __attribute__((noinline)) #endif __device__ void calculate_micro_xs( double p_energy, int nuc, long n_isotopes, long n_gridpoints, double * __restrict__ egrid, int * __restrict__ index_data, NuclideGridPoint * __restrict__ nuclide_grids, long idx, double * __restrict__ xs_vector, int grid_type, int hash_bins ){ // Variables double f; NuclideGridPoint * low, * high; #ifdef TEMPLATIZE grid_type = UNIONIZED; #endif // If using only the nuclide grid, we must perform a binary search // to find the energy location in this particular nuclide's grid. if( grid_type == NUCLIDE ) { // Perform binary search on the Nuclide Grid to find the index idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1); // pull ptr from nuclide grid and check to ensure that // we're not reading off the end of the nuclide's grid if( idx == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + 1]; else low = &nuclide_grids[nuc*n_gridpoints + idx]; } else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed. { // pull ptr from energy grid and check to ensure that // we're not reading off the end of the nuclide's grid if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1]; else low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]]; } else // Hash grid { // load lower bounding index int u_low = index_data[idx * n_isotopes + nuc]; // Determine higher bounding index int u_high; if( idx == hash_bins - 1 ) u_high = n_gridpoints - 1; else u_high = index_data[(idx+1)*n_isotopes + nuc] + 1; // Check edge cases to make sure energy is actually between these // Then, if things look good, search for gridpoint in the nuclide grid // within the lower and higher limits we've calculated. double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy; double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy; int lower; if( p_energy <= e_low ) lower = 0; else if( p_energy >= e_high ) lower = n_gridpoints - 1; else lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high); if( lower == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + lower - 1]; else low = &nuclide_grids[nuc*n_gridpoints + lower]; } high = low + 1; // calculate the re-useable interpolation factor f = (high->energy - p_energy) / (high->energy - low->energy); // Total XS xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs); // Elastic XS xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs); // Absorbtion XS xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs); // Fission XS xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs); // Nu Fission XS xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs); } // Calculates macroscopic cross section based on a given material & energy __device__ void calculate_macro_xs( double p_energy, int mat, long n_isotopes, long n_gridpoints, int * __restrict__ num_nucs, double * __restrict__ concs, double * __restrict__ egrid, int * __restrict__ index_data, NuclideGridPoint * __restrict__ nuclide_grids, int * __restrict__ mats, double * __restrict__ macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){ int p_nuc; // the nuclide we are looking up long idx = -1; double conc; // the concentration of the nuclide in the material // cleans out macro_xs_vector for( int k = 0; k < 5; k++ ) macro_xs_vector[k] = 0; // If we are using the unionized energy grid (UEG), we only // need to perform 1 binary search per macroscopic lookup. // If we are using the nuclide grid search, it will have to be // done inside of the "calculate_micro_xs" function for each different // nuclide in the material. if( grid_type == UNIONIZED ) idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid); else if( grid_type == HASH ) { double du = 1.0 / hash_bins; idx = p_energy / du; } // Once we find the pointer array on the UEG, we can pull the data // from the respective nuclide grids, as well as the nuclide // concentration data for the material // Each nuclide from the material needs to have its micro-XS array // looked up & interpolatied (via calculate_micro_xs). Then, the // micro XS is multiplied by the concentration of that nuclide // in the material, and added to the total macro XS array. // (Independent -- though if parallelizing, must use atomic operations // or otherwise control access to the xs_vector and macro_xs_vector to // avoid simulataneous writing to the same data structure) for( int j = 0; j < num_nucs[mat]; j++ ) { double xs_vector[5]; p_nuc = mats[mat*max_num_nucs + j]; conc = concs[mat*max_num_nucs + j]; calculate_micro_xs( p_energy, p_nuc, n_isotopes, n_gridpoints, egrid, index_data, nuclide_grids, idx, xs_vector, grid_type, hash_bins ); for( int k = 0; k < 5; k++ ) macro_xs_vector[k] += xs_vector[k] * conc; } } // binary search for energy on unionized energy grid // returns lower index __host__ __device__ long grid_search( long n, double quarry, double * __restrict__ A) { long lowerLimit = 0; long upperLimit = n-1; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint] > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // binary search for energy on nuclide energy grid // __attribute__((noinline)) __host__ __device__ long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high) { long lowerLimit = low; long upperLimit = high; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint].energy > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // picks a material based on a probabilistic distribution __device__ int pick_mat( uint64_t * seed ) { // I have a nice spreadsheet supporting these numbers. They are // the fractions (by volume) of material in the core. Not a // *perfect* approximation of where XS lookups are going to occur, // but this will do a good job of biasing the system nonetheless. // Also could be argued that doing fractions by weight would be // a better approximation, but volume does a good enough job for now. double dist[12]; dist[0] = 0.140; // fuel dist[1] = 0.052; // cladding dist[2] = 0.275; // cold, borated water dist[3] = 0.134; // hot, borated water dist[4] = 0.154; // RPV dist[5] = 0.064; // Lower, radial reflector dist[6] = 0.066; // Upper reflector / top plate dist[7] = 0.055; // bottom plate dist[8] = 0.008; // bottom nozzle dist[9] = 0.015; // top nozzle dist[10] = 0.025; // top of fuel assemblies dist[11] = 0.013; // bottom of fuel assemblies double roll = LCG_random_double(seed); // makes a pick based on the distro for( int i = 0; i < 12; i++ ) { double running = 0; for( int j = i; j > 0; j-- ) running += dist[j]; if( roll < running ) return i; } return 0; } __host__ __device__ double LCG_random_double(uint64_t * seed) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return (double) (*seed) / (double) m; } __device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n > 0) { if(n & 1) { a_new *= a; c_new = c_new * a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; } //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// // OPTIMIZED VARIANT FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // This section contains a number of optimized variants of some of the above // functions, which each deploy a different combination of optimizations strategies // specific to GPU. By default, XSBench will not run any of these variants. They // must be specifically selected using the "-k <optimized variant ID>" command // line argument. //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// // Optimization 1 -- Basic kernel splitting of sampling & lookup routines //////////////////////////////////////////////////////////////////////////////////// // This optimization requires a little extra data to store all material IDs and // energies for the sampled particles between kernel calls. By itself, this // optimization is likely actually a bit of a slowdown compared to the baseline // kernel. However, it will be used by better optimization kernels down the line. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_1(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 1 - basic sample/lookup kernel splitting"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); hipLaunchKernelGGL(( xs_lookup_kernel_optimization_1), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; } __global__ void sampling_kernel(Inputs in, SimulationData GSD ) { // The lookup ID. const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); // Store sample data in state array GSD.p_energy_samples[i] = p_energy; GSD.mat_samples[i] = mat; } __global__ void xs_lookup_kernel_optimization_1(Inputs in, SimulationData GSD ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) GSD.mat_samples[i], // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 2 -- Kernel Splitting + Material-Specific Lookup Kernels //////////////////////////////////////////////////////////////////////////////////// // This one builds on the first optimization. It uses multiple kernels, one // for each material type, to better balance the workload across threads within // a warp. This works because each material will have a different number of // isotopes, with some having a ton, meaning that SIMD efficiency can be rather // low by default. Better efficiency may be gained in further optimizations by // sorting the lookups first. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_2(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 2 - Material Lookup Kernels"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); // Launch all material kernels individually for( int m = 0; m < 12; m++ ) hipLaunchKernelGGL(( xs_lookup_kernel_optimization_2), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, m ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_2(Inputs in, SimulationData GSD, int m ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; // Check that our material type matches the kernel material int mat = GSD.mat_samples[i]; if( mat != m ) return; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 3 -- Kernel Splitting + Fuel or Not-Fuel Lookups //////////////////////////////////////////////////////////////////////////////////// // This optimization alters Optimization 2. Instead of executing a kernel call for // ALL different material types, only two different calls are made. One for fuel, // and one for all the other materials. As the fuel material has by far the most // isotopes, it takes much longer than the rest. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_3(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 3 - Fuel or Other Lookup Kernels"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); // Launch all material kernels individually hipLaunchKernelGGL(( xs_lookup_kernel_optimization_3), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, 0 ); hipLaunchKernelGGL(( xs_lookup_kernel_optimization_3), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, 1 ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_3(Inputs in, SimulationData GSD, int is_fuel ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; int mat = GSD.mat_samples[i]; // If this is the fuel kernel, AND this is a fuel lookup, then perform a lookup // OR if this is not the fuel kernel, AND this is not a fuel lookup, then perform the lookup if( ((is_fuel == 1) && (mat == 0)) || ((is_fuel == 0) && (mat != 0 ) )) { double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } } //////////////////////////////////////////////////////////////////////////////////// // Optimization 4 -- Kernel Splitting + All Material Lookups + Full Sort //////////////////////////////////////////////////////////////////////////////////// // This optimization builds on optimization 2, adding in a full sort before // hand so that the warps should be densely packed together. This should maximize // SIMD efficiency of the kernel, but may incur an added cost for the sort. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_4(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 4 - All Material Lookup Kernels + Material Sort"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); // Count the number of fuel material lookups that need to be performed (fuel id = 0) int n_lookups_per_material[12]; for( int m = 0; m < 12; m++ ) n_lookups_per_material[m] = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, m); // Sort materials thrust::sort_by_key(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples); // Launch all material kernels individually int offset = 0; for( int m = 0; m < 12; m++ ) { nthreads = 32; nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads); hipLaunchKernelGGL(( xs_lookup_kernel_optimization_4), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, m, n_lookups_per_material[m], offset ); offset += n_lookups_per_material[m]; } gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_4(Inputs in, SimulationData GSD, int m, int n_lookups, int offset ) { // The lookup ID. Used to set the seed, and to store the verification value int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= n_lookups ) return; i += offset; // Check that our material type matches the kernel material int mat = GSD.mat_samples[i]; if( mat != m ) return; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 5 -- Kernel Splitting + Fuel/Other Lookups + Fuel/Other Partition //////////////////////////////////////////////////////////////////////////////////// // This optimization is similar to optimization 4, but instead of sorting // fully by material, we just sort by fuel or not fuel. Similarly, instead of // launching kernels for all materials, similar to optimization 3 we only launch // kernels for the fuel and other mateirals. //////////////////////////////////////////////////////////////////////////////////// // Comparator for partitioning stage struct is_mat_fuel{ __host__ __device__ bool operator()(const int & a) { return a == 0; } }; unsigned long long run_event_based_simulation_optimization_5(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 5 - Fuel/No Fuel Lookup Kernels + Fuel/No Fuel Sort"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); // Count the number of fuel material lookups that need to be performed (fuel id = 0) int n_fuel_lookups = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, 0); // Partition fuel into the first part of the array thrust::partition(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples, is_mat_fuel()); // Launch all material kernels individually (asynchronous is allowed) nblocks = ceil( (double) n_fuel_lookups / (double) nthreads); hipLaunchKernelGGL(( xs_lookup_kernel_optimization_5), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, n_fuel_lookups, 0 ); nblocks = ceil( (double) (in.lookups - n_fuel_lookups) / (double) nthreads); hipLaunchKernelGGL(( xs_lookup_kernel_optimization_5), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, in.lookups-n_fuel_lookups, n_fuel_lookups ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_5(Inputs in, SimulationData GSD, int n_lookups, int offset ) { // The lookup ID. Used to set the seed, and to store the verification value int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= n_lookups ) return; i += offset; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) GSD.mat_samples[i], // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 6 -- Kernel Splitting + All Material Lookups + Full Sort // + Energy Sort //////////////////////////////////////////////////////////////////////////////////// // This optimization builds on optimization 4, adding in a second sort by energy. // It is extremely fast, as now most of the threads within a warp will be hitting // the same indices in the lookup grids. This greatly reduces thread divergence and // greatly improves cache efficiency and re-use. // // However, it is unlikely that this exact optimization would be possible in a real // application like OpenMC. One major difference is that particle objects are quite // large, often having 50+ variable fields, such that sorting them in memory becomes // rather expensive. Instead, the best possible option would probably be to create // intermediate indexing (per Hamilton et. al 2019), and run the kernels indirectly. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_6(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 6 - Material & Energy Sorts + Material-specific Kernels"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); // Count the number of fuel material lookups that need to be performed (fuel id = 0) int n_lookups_per_material[12]; for( int m = 0; m < 12; m++ ) n_lookups_per_material[m] = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, m); // Sort by material first thrust::sort_by_key(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples); // Now, sort each material by energy int offset = 0; for( int m = 0; m < 12; m++ ) { thrust::sort_by_key(thrust::device, GSD.p_energy_samples + offset, GSD.p_energy_samples + offset + n_lookups_per_material[m], GSD.mat_samples + offset); offset += n_lookups_per_material[m]; } // Launch all material kernels individually offset = 0; for( int m = 0; m < 12; m++ ) { nthreads = 32; nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads); hipLaunchKernelGGL(( xs_lookup_kernel_optimization_4), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, m, n_lookups_per_material[m], offset ); offset += n_lookups_per_material[m]; } gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; }
6292ac4c0f31f236bc20283d3f05c906a05a6155.cu
#include "XSbench_header.cuh" //////////////////////////////////////////////////////////////////////////////////// // BASELINE FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // All "baseline" code is at the top of this file. The baseline code is a simple // port of the original CPU OpenMP code to CUDA with few significant changes or // optimizations made. Following these functions are a number of optimized variants, // which each deploy a different combination of optimizations strategies. By // default, XSBench will only run the baseline implementation. Optimized variants // must be specifically selected using the "-k <optimized variant ID>" command // line argument. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_baseline(Inputs in, SimulationData GSD, int mype) { //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Running baseline event-based simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); xs_lookup_kernel_baseline<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); #ifdef PRINT #ifdef FORWARD_PASS double here = 1.23456; gpuErrchk( cudaMemcpy(&here, GSD.dout, sizeof(double), cudaMemcpyDeviceToHost) ); printf("fwdhere=%f\n", here); printf("der=%f\n", here); #else //size_t num = (GSD.length_nuclide_grid < 10 ) ? GSD.length_nuclide_grid : 10 ; size_t num = 1; double here[num]; gpuErrchk( cudaMemcpy(&here[0], &GSD.d_nuclide_grid[0].energy, num * sizeof(double), cudaMemcpyDeviceToHost) ); for (int i=0; i<num; i++) printf("bwdhere=%f\n", here[i]); printf("der=%f\n", here[0]); #endif #endif unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; } template<typename... Args> __device__ void __enzyme_autodiff(void*, Args...); __device__ int enzyme_dup, enzyme_const, enzyme_active; // In this kernel, we perform a single lookup with each thread. Threads within a warp // do not really have any relation to each other, and divergence due to high nuclide count fuel // material lookups are costly. This kernel constitutes baseline performance. __global__ void xs_lookup_kernel_baseline(Inputs in, SimulationData GSD ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); double macro_xs_vector[5] = {0}; #ifdef PRINT double d_macro_xs_vector[5] = {0.0}; d_macro_xs_vector[0] = 1.0; #else double d_macro_xs_vector[5] = {1.0}; #endif //if (i == 0) // printf("Running correct sim\n"); // Perform macroscopic Cross Section Lookup #ifdef FORWARD_PASS calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); #ifdef PRINT double macro_xs_vector2[5] = {0}; calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.d_nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector2, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); //if ((macro_xs_vector2[0] - macro_xs_vector[0]) / DELTA) // printf("i=%d dd=%f, out2=%f out1=%f | in2=%f in1=%f\n", i, (macro_xs_vector2[0] - macro_xs_vector[0]) / DELTA, macro_xs_vector2[0], macro_xs_vector[0], GSD.d_nuclide_grid[0].energy, GSD.nuclide_grid[0].energy); atomicAdd(GSD.dout, (macro_xs_vector2[0] - macro_xs_vector[0]) / DELTA ); #endif #else __enzyme_autodiff((void*)calculate_macro_xs, enzyme_const, p_energy, // Sampled neutron energy (in lethargy) enzyme_const, mat, // Sampled material type index neutron is in enzyme_const, in.n_isotopes, // Total number of isotopes in simulation enzyme_const, in.n_gridpoints, // Number of gridpoints per isotope in simulation enzyme_const, GSD.num_nucs, // 1-D array with number of nuclides per material enzyme_const, GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material enzyme_const, GSD.unionized_energy_array, // 1-D Unionized energy array enzyme_const, GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level //enzyme_const, GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation enzyme_dup, GSD.nuclide_grid, GSD.d_nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation enzyme_const, GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material //enzyme_const, macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) enzyme_dup, macro_xs_vector, d_macro_xs_vector,// 1-D array with result of the macroscopic cross section (5 different reaction channels) enzyme_const, in.grid_type, // Lookup type (nuclide, hash, or unionized) enzyme_const, in.hash_bins, // Number of hash bins used (if using hash lookup type) enzyme_const, GSD.max_num_nucs // Maximum number of nuclides present in any material ); #endif // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } // Calculates the microscopic cross section for a given nuclide & energy //__attribute__((noinline)) #ifdef ALWAYS_INLINE __attribute__((always_inline)) #else __attribute__((noinline)) #endif __device__ void calculate_micro_xs( double p_energy, int nuc, long n_isotopes, long n_gridpoints, double * __restrict__ egrid, int * __restrict__ index_data, NuclideGridPoint * __restrict__ nuclide_grids, long idx, double * __restrict__ xs_vector, int grid_type, int hash_bins ){ // Variables double f; NuclideGridPoint * low, * high; #ifdef TEMPLATIZE grid_type = UNIONIZED; #endif // If using only the nuclide grid, we must perform a binary search // to find the energy location in this particular nuclide's grid. if( grid_type == NUCLIDE ) { // Perform binary search on the Nuclide Grid to find the index idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1); // pull ptr from nuclide grid and check to ensure that // we're not reading off the end of the nuclide's grid if( idx == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + 1]; else low = &nuclide_grids[nuc*n_gridpoints + idx]; } else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed. { // pull ptr from energy grid and check to ensure that // we're not reading off the end of the nuclide's grid if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1]; else low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]]; } else // Hash grid { // load lower bounding index int u_low = index_data[idx * n_isotopes + nuc]; // Determine higher bounding index int u_high; if( idx == hash_bins - 1 ) u_high = n_gridpoints - 1; else u_high = index_data[(idx+1)*n_isotopes + nuc] + 1; // Check edge cases to make sure energy is actually between these // Then, if things look good, search for gridpoint in the nuclide grid // within the lower and higher limits we've calculated. double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy; double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy; int lower; if( p_energy <= e_low ) lower = 0; else if( p_energy >= e_high ) lower = n_gridpoints - 1; else lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high); if( lower == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + lower - 1]; else low = &nuclide_grids[nuc*n_gridpoints + lower]; } high = low + 1; // calculate the re-useable interpolation factor f = (high->energy - p_energy) / (high->energy - low->energy); // Total XS xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs); // Elastic XS xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs); // Absorbtion XS xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs); // Fission XS xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs); // Nu Fission XS xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs); } // Calculates macroscopic cross section based on a given material & energy __device__ void calculate_macro_xs( double p_energy, int mat, long n_isotopes, long n_gridpoints, int * __restrict__ num_nucs, double * __restrict__ concs, double * __restrict__ egrid, int * __restrict__ index_data, NuclideGridPoint * __restrict__ nuclide_grids, int * __restrict__ mats, double * __restrict__ macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){ int p_nuc; // the nuclide we are looking up long idx = -1; double conc; // the concentration of the nuclide in the material // cleans out macro_xs_vector for( int k = 0; k < 5; k++ ) macro_xs_vector[k] = 0; // If we are using the unionized energy grid (UEG), we only // need to perform 1 binary search per macroscopic lookup. // If we are using the nuclide grid search, it will have to be // done inside of the "calculate_micro_xs" function for each different // nuclide in the material. if( grid_type == UNIONIZED ) idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid); else if( grid_type == HASH ) { double du = 1.0 / hash_bins; idx = p_energy / du; } // Once we find the pointer array on the UEG, we can pull the data // from the respective nuclide grids, as well as the nuclide // concentration data for the material // Each nuclide from the material needs to have its micro-XS array // looked up & interpolatied (via calculate_micro_xs). Then, the // micro XS is multiplied by the concentration of that nuclide // in the material, and added to the total macro XS array. // (Independent -- though if parallelizing, must use atomic operations // or otherwise control access to the xs_vector and macro_xs_vector to // avoid simulataneous writing to the same data structure) for( int j = 0; j < num_nucs[mat]; j++ ) { double xs_vector[5]; p_nuc = mats[mat*max_num_nucs + j]; conc = concs[mat*max_num_nucs + j]; calculate_micro_xs( p_energy, p_nuc, n_isotopes, n_gridpoints, egrid, index_data, nuclide_grids, idx, xs_vector, grid_type, hash_bins ); for( int k = 0; k < 5; k++ ) macro_xs_vector[k] += xs_vector[k] * conc; } } // binary search for energy on unionized energy grid // returns lower index __host__ __device__ long grid_search( long n, double quarry, double * __restrict__ A) { long lowerLimit = 0; long upperLimit = n-1; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint] > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // binary search for energy on nuclide energy grid // __attribute__((noinline)) __host__ __device__ long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high) { long lowerLimit = low; long upperLimit = high; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint].energy > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // picks a material based on a probabilistic distribution __device__ int pick_mat( uint64_t * seed ) { // I have a nice spreadsheet supporting these numbers. They are // the fractions (by volume) of material in the core. Not a // *perfect* approximation of where XS lookups are going to occur, // but this will do a good job of biasing the system nonetheless. // Also could be argued that doing fractions by weight would be // a better approximation, but volume does a good enough job for now. double dist[12]; dist[0] = 0.140; // fuel dist[1] = 0.052; // cladding dist[2] = 0.275; // cold, borated water dist[3] = 0.134; // hot, borated water dist[4] = 0.154; // RPV dist[5] = 0.064; // Lower, radial reflector dist[6] = 0.066; // Upper reflector / top plate dist[7] = 0.055; // bottom plate dist[8] = 0.008; // bottom nozzle dist[9] = 0.015; // top nozzle dist[10] = 0.025; // top of fuel assemblies dist[11] = 0.013; // bottom of fuel assemblies double roll = LCG_random_double(seed); // makes a pick based on the distro for( int i = 0; i < 12; i++ ) { double running = 0; for( int j = i; j > 0; j-- ) running += dist[j]; if( roll < running ) return i; } return 0; } __host__ __device__ double LCG_random_double(uint64_t * seed) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return (double) (*seed) / (double) m; } __device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n > 0) { if(n & 1) { a_new *= a; c_new = c_new * a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; } //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// // OPTIMIZED VARIANT FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // This section contains a number of optimized variants of some of the above // functions, which each deploy a different combination of optimizations strategies // specific to GPU. By default, XSBench will not run any of these variants. They // must be specifically selected using the "-k <optimized variant ID>" command // line argument. //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// // Optimization 1 -- Basic kernel splitting of sampling & lookup routines //////////////////////////////////////////////////////////////////////////////////// // This optimization requires a little extra data to store all material IDs and // energies for the sampled particles between kernel calls. By itself, this // optimization is likely actually a bit of a slowdown compared to the baseline // kernel. However, it will be used by better optimization kernels down the line. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_1(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 1 - basic sample/lookup kernel splitting"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); xs_lookup_kernel_optimization_1<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; } __global__ void sampling_kernel(Inputs in, SimulationData GSD ) { // The lookup ID. const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); // Store sample data in state array GSD.p_energy_samples[i] = p_energy; GSD.mat_samples[i] = mat; } __global__ void xs_lookup_kernel_optimization_1(Inputs in, SimulationData GSD ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) GSD.mat_samples[i], // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 2 -- Kernel Splitting + Material-Specific Lookup Kernels //////////////////////////////////////////////////////////////////////////////////// // This one builds on the first optimization. It uses multiple kernels, one // for each material type, to better balance the workload across threads within // a warp. This works because each material will have a different number of // isotopes, with some having a ton, meaning that SIMD efficiency can be rather // low by default. Better efficiency may be gained in further optimizations by // sorting the lookups first. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_2(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 2 - Material Lookup Kernels"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // Launch all material kernels individually for( int m = 0; m < 12; m++ ) xs_lookup_kernel_optimization_2<<<nblocks, nthreads>>>( in, GSD, m ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_2(Inputs in, SimulationData GSD, int m ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; // Check that our material type matches the kernel material int mat = GSD.mat_samples[i]; if( mat != m ) return; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 3 -- Kernel Splitting + Fuel or Not-Fuel Lookups //////////////////////////////////////////////////////////////////////////////////// // This optimization alters Optimization 2. Instead of executing a kernel call for // ALL different material types, only two different calls are made. One for fuel, // and one for all the other materials. As the fuel material has by far the most // isotopes, it takes much longer than the rest. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_3(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 3 - Fuel or Other Lookup Kernels"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // Launch all material kernels individually xs_lookup_kernel_optimization_3<<<nblocks, nthreads>>>( in, GSD, 0 ); xs_lookup_kernel_optimization_3<<<nblocks, nthreads>>>( in, GSD, 1 ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_3(Inputs in, SimulationData GSD, int is_fuel ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; int mat = GSD.mat_samples[i]; // If this is the fuel kernel, AND this is a fuel lookup, then perform a lookup // OR if this is not the fuel kernel, AND this is not a fuel lookup, then perform the lookup if( ((is_fuel == 1) && (mat == 0)) || ((is_fuel == 0) && (mat != 0 ) )) { double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } } //////////////////////////////////////////////////////////////////////////////////// // Optimization 4 -- Kernel Splitting + All Material Lookups + Full Sort //////////////////////////////////////////////////////////////////////////////////// // This optimization builds on optimization 2, adding in a full sort before // hand so that the warps should be densely packed together. This should maximize // SIMD efficiency of the kernel, but may incur an added cost for the sort. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_4(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 4 - All Material Lookup Kernels + Material Sort"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // Count the number of fuel material lookups that need to be performed (fuel id = 0) int n_lookups_per_material[12]; for( int m = 0; m < 12; m++ ) n_lookups_per_material[m] = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, m); // Sort materials thrust::sort_by_key(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples); // Launch all material kernels individually int offset = 0; for( int m = 0; m < 12; m++ ) { nthreads = 32; nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads); xs_lookup_kernel_optimization_4<<<nblocks, nthreads>>>( in, GSD, m, n_lookups_per_material[m], offset ); offset += n_lookups_per_material[m]; } gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_4(Inputs in, SimulationData GSD, int m, int n_lookups, int offset ) { // The lookup ID. Used to set the seed, and to store the verification value int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= n_lookups ) return; i += offset; // Check that our material type matches the kernel material int mat = GSD.mat_samples[i]; if( mat != m ) return; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 5 -- Kernel Splitting + Fuel/Other Lookups + Fuel/Other Partition //////////////////////////////////////////////////////////////////////////////////// // This optimization is similar to optimization 4, but instead of sorting // fully by material, we just sort by fuel or not fuel. Similarly, instead of // launching kernels for all materials, similar to optimization 3 we only launch // kernels for the fuel and other mateirals. //////////////////////////////////////////////////////////////////////////////////// // Comparator for partitioning stage struct is_mat_fuel{ __host__ __device__ bool operator()(const int & a) { return a == 0; } }; unsigned long long run_event_based_simulation_optimization_5(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 5 - Fuel/No Fuel Lookup Kernels + Fuel/No Fuel Sort"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // Count the number of fuel material lookups that need to be performed (fuel id = 0) int n_fuel_lookups = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, 0); // Partition fuel into the first part of the array thrust::partition(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples, is_mat_fuel()); // Launch all material kernels individually (asynchronous is allowed) nblocks = ceil( (double) n_fuel_lookups / (double) nthreads); xs_lookup_kernel_optimization_5<<<nblocks, nthreads>>>( in, GSD, n_fuel_lookups, 0 ); nblocks = ceil( (double) (in.lookups - n_fuel_lookups) / (double) nthreads); xs_lookup_kernel_optimization_5<<<nblocks, nthreads>>>( in, GSD, in.lookups-n_fuel_lookups, n_fuel_lookups ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_5(Inputs in, SimulationData GSD, int n_lookups, int offset ) { // The lookup ID. Used to set the seed, and to store the verification value int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= n_lookups ) return; i += offset; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) GSD.mat_samples[i], // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 6 -- Kernel Splitting + All Material Lookups + Full Sort // + Energy Sort //////////////////////////////////////////////////////////////////////////////////// // This optimization builds on optimization 4, adding in a second sort by energy. // It is extremely fast, as now most of the threads within a warp will be hitting // the same indices in the lookup grids. This greatly reduces thread divergence and // greatly improves cache efficiency and re-use. // // However, it is unlikely that this exact optimization would be possible in a real // application like OpenMC. One major difference is that particle objects are quite // large, often having 50+ variable fields, such that sorting them in memory becomes // rather expensive. Instead, the best possible option would probably be to create // intermediate indexing (per Hamilton et. al 2019), and run the kernels indirectly. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_6(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 6 - Material & Energy Sorts + Material-specific Kernels"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // Count the number of fuel material lookups that need to be performed (fuel id = 0) int n_lookups_per_material[12]; for( int m = 0; m < 12; m++ ) n_lookups_per_material[m] = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, m); // Sort by material first thrust::sort_by_key(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples); // Now, sort each material by energy int offset = 0; for( int m = 0; m < 12; m++ ) { thrust::sort_by_key(thrust::device, GSD.p_energy_samples + offset, GSD.p_energy_samples + offset + n_lookups_per_material[m], GSD.mat_samples + offset); offset += n_lookups_per_material[m]; } // Launch all material kernels individually offset = 0; for( int m = 0; m < 12; m++ ) { nthreads = 32; nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads); xs_lookup_kernel_optimization_4<<<nblocks, nthreads>>>( in, GSD, m, n_lookups_per_material[m], offset ); offset += n_lookups_per_material[m]; } gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; }
1b29bb00d3d4c5d21b4728750d60fa51f8568e9c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void AddLocalErrorKernel( int s1, float *distance, float *localError ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < 1) { localError[s1] += distance[s1] * distance[s1]; } }
1b29bb00d3d4c5d21b4728750d60fa51f8568e9c.cu
#include "includes.h" __global__ void AddLocalErrorKernel( int s1, float *distance, float *localError ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < 1) { localError[s1] += distance[s1] * distance[s1]; } }
4aa87574337d25ded072dc8227e0250c09d6d0aa.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021 * All rights reserved. */ #include "hip/hip_runtime.h" #include "hip_util.hip" #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #define DATA_SIZE 1048576 #define THREAD_NUM 256 #define BLOCK_NUM 32 #define BLOCK_SIZE 16 bool initCUDA() { int count = 0; // hipGetDeviceCount CUDA , count // CUDA hipGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "This is no device.\n"); return false; } int i; for (i = 0; i < count; ++i) { // hipGetDeviceProperties CUDA struct hipDeviceProp_t prop; if (hipGetDeviceProperties(&prop, i) == hipSuccess) { if (prop.major >= 1) { printDeviceProp(prop); break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x .\n"); return false; } // Devicehost_global_RuntimeAPI hipSetDevice(i); return true; } // // CUDA clock // timestamp GPU // __global__ static void sumOfSquares(int *num, int *result, clock_t *time) { int sum = 0; int i; clock_t start = clock(); for (i = 0; i < DATA_SIZE; ++i) { sum += num[i] * num[i]; } *result = sum; *time = clock() - start; } // __global__ static void sumOfSquaresParallel(int *num, int *result, clock_t *time) { const int tid = threadIdx.x; int sum = 0; int i; clock_t start; if(tid == 0) { start = clock(); } for(i = tid; i < DATA_SIZE; i = i + THREAD_NUM) { sum += num[i] * num[i]; } result[tid] = sum; if(tid == 0) *time = clock() - start; } // __global__ static void sumOfSquaresMultiBlocks(int *num, int *result, clock_t *time) { const int tid = threadIdx.x; const int bid = blockIdx.x; int sum = 0; int i; if(tid == 0) { time[bid] = clock(); } for(i = bid * THREAD_NUM + tid; i < DATA_SIZE; i = i + BLOCK_NUM * THREAD_NUM) { sum += num[i] * num[i]; } result[bid * THREAD_NUM + tid] = sum; if(tid == 0) time[bid + BLOCK_NUM] = clock(); } // Thread __global__ static void sumOfSquaresSync(int *num, int *result, clock_t *time) { // shared memoryblockthread extern __shared__ int shared[]; const int tid = threadIdx.x; const int bid = blockIdx.x; int i; int offset = THREAD_NUM >> 1; if(tid == 0) { time[bid] = clock(); } shared[tid] = 0; for(i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM) { shared[tid] += num[i] * num[i]; } // blockthread __syncthreads(); /*** if(tid == 0) { for(int i = 1; i < THREAD_NUM; ++i) { shared[0] += shared[i]; } result[bid] = shared[0]; } ***/ while(offset > 0) { if(tid < offset) { shared[tid] += shared[tid + offset]; } offset >>= 1; __syncthreads(); } if(tid == 0) { result[bid] = shared[0]; time[bid + BLOCK_NUM] = clock(); } } void calculateOnCPU(int *data) { // cpu int final_sum = 0; for(int i = 0; i < DATA_SIZE; ++i) { final_sum += data[i] * data[i]; } printf("sum(CPU): %d\n", final_sum); } void calculateOnGPU(int *data) { // int *gpu_data, *result; clock_t *time; hipMalloc((void **)&gpu_data, sizeof(int) * DATA_SIZE); hipMalloc((void **)&result, sizeof(int)); hipMalloc((void **)&time, sizeof(clock_t)); // hipMemcpy // hipMemcpyHostToDevice - // hipMemcpyDeviceToHost - hipMemcpy(gpu_data, data, sizeof(int) * DATA_SIZE, hipMemcpyHostToDevice); // <<<block , thread , shared memory >>>(...); hipLaunchKernelGGL(( sumOfSquares), dim3(1), dim3(1), 0, 0, gpu_data, result, time); int sum; clock_t time_cost; hipMemcpy(&sum, result, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&time_cost, time, sizeof(clock_t), hipMemcpyDeviceToHost); hipFree(gpu_data); hipFree(result); hipFree(time); printf("sum(GPU): %d time cost: %ld\n", sum, time_cost); } void calculateUsingMultiThreads(int *data) { int *gpu_data, *result; clock_t *time; hipMalloc((void **)&gpu_data, sizeof(int) * DATA_SIZE); hipMalloc((void **)&result, sizeof(int) * THREAD_NUM); hipMalloc((void **)&time, sizeof(clock_t)); hipMemcpy(gpu_data, data, sizeof(int) * DATA_SIZE, hipMemcpyHostToDevice); // hipLaunchKernelGGL(( sumOfSquaresParallel), dim3(1), dim3(THREAD_NUM), 0, 0, gpu_data, result, time); int sum[THREAD_NUM]; clock_t time_cost; hipMemcpy(&sum, result, sizeof(int) * THREAD_NUM, hipMemcpyDeviceToHost); hipMemcpy(&time_cost, time, sizeof(clock_t), hipMemcpyDeviceToHost); hipFree(gpu_data); hipFree(result); hipFree(time); // cpu int final_sum = 0; for(int i = 0; i < THREAD_NUM; ++i) { final_sum += sum[i]; } printf("sum(GPU, multi threads): %d time: %ld\n", final_sum, time_cost); } void calculateUsingMultiBlocks(int *data) { int *gpu_data, *result; clock_t *time; hipMalloc((void **)&gpu_data, sizeof(int) * DATA_SIZE); hipMalloc((void **)&result, sizeof(int) * THREAD_NUM * BLOCK_NUM); hipMalloc((void **)&time, sizeof(clock_t) * BLOCK_NUM * 2); hipMemcpy(gpu_data, data, sizeof(int) * DATA_SIZE, hipMemcpyHostToDevice); // <<<block , thread , shared memory >>>(...); hipLaunchKernelGGL(( sumOfSquaresMultiBlocks), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, gpu_data, result, time); int sum[THREAD_NUM * BLOCK_NUM]; clock_t time_cost[BLOCK_NUM * 2]; hipMemcpy(&sum, result, sizeof(int) * THREAD_NUM * BLOCK_NUM, hipMemcpyDeviceToHost); hipMemcpy(&time_cost, time, sizeof(clock_t) * BLOCK_NUM * 2, hipMemcpyDeviceToHost); hipFree(gpu_data); hipFree(result); hipFree(time); // cpu int final_sum = 0; for(int i = 0; i < THREAD_NUM * BLOCK_NUM; ++i) { final_sum += sum[i]; } clock_t min_start, max_end; min_start = time_cost[0]; max_end = time_cost[BLOCK_NUM]; for(int i = 0; i < BLOCK_NUM; ++i) { if(min_start > time_cost[i]) { min_start = time_cost[i]; } if(max_end < time_cost[i + BLOCK_NUM]) { max_end = time_cost[i + BLOCK_NUM]; } } printf("sum(GPU, multi blocks): %d time: %ld\n", final_sum, max_end - min_start); } void calculateUsingSync(int *data) { int *gpu_data, *result; clock_t *time; hipMalloc((void **)&gpu_data, sizeof(int) * DATA_SIZE); hipMalloc((void **)&result, sizeof(int) * BLOCK_NUM); hipMalloc((void **)&time, sizeof(clock_t) * BLOCK_NUM * 2); hipMemcpy(gpu_data, data, sizeof(int) * DATA_SIZE, hipMemcpyHostToDevice); // <<<block , thread , shared memory >>>(...); hipLaunchKernelGGL(( sumOfSquaresSync), dim3(BLOCK_NUM), dim3(THREAD_NUM), THREAD_NUM * sizeof(int), 0, gpu_data, result, time); int sum[BLOCK_NUM]; clock_t time_cost[BLOCK_NUM * 2]; hipMemcpy(&sum, result, sizeof(int) * BLOCK_NUM, hipMemcpyDeviceToHost); hipMemcpy(&time_cost, time, sizeof(clock_t) * BLOCK_NUM * 2, hipMemcpyDeviceToHost); hipFree(gpu_data); hipFree(result); hipFree(time); int final_sum = 0; for(int i = 0; i < BLOCK_NUM; ++i) { final_sum += sum[i]; } clock_t min_start, max_end; min_start = time_cost[0]; max_end = time_cost[BLOCK_NUM]; for(int i = 0; i < BLOCK_NUM; ++i) { if(min_start > time_cost[i]) { min_start = time_cost[i]; } if(max_end < time_cost[i + BLOCK_NUM]) { max_end = time_cost[i + BLOCK_NUM]; } } printf("sum(GPU, Sync): %d time: %ld\n", final_sum, max_end - min_start); } bool calculateSumOfSquares() { // int data[DATA_SIZE]; generateNumbers(data, DATA_SIZE); // CPU calculateOnCPU(data); // GPUone thread one block calculateOnGPU(data); // GPUmulti threads one block calculateUsingMultiThreads(data); // GPUmulti threads multi blocks calculateUsingMultiBlocks(data); // GPUmulti threads multi blocks, using sync calculateUsingSync(data); return true; } __global__ static void matMultCUDA(const float* a, size_t id_a, const float *b, size_t id_b, float *c, size_t id_c, int n) { /* const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid * blockDim.x + tid; const int row = idx / n; const int col = idx % n; int i; if(row < n && col < n) { float t = 0; for(i = 0; i < n; ++i) { t += a[row*id_a+i] * b[i*id_b+col]; } c[row*id_c+col] = t; } */ extern __shared__ float data[]; const int tid = threadIdx.x; const int row = blockIdx.x; int i, j; for(i = tid; i < n; i += blockDim.x) { data[i] = a[row * id_a + i]; } __syncthreads(); for(j = tid; j < n; j += blockDim.x) { float t = 0; float y = 0; for(i = 0; i < n; ++i) { float r; y -= data[i] * b[i*id_b+j]; r = t - y; y = (r -t) +y; t = r; } c[row*id_c+j] = t; } } __global__ static void matMultCUDAMultiBlocks(const float* a, size_t id_a, const float *b, size_t id_b, float *c, size_t id_c, int n) { } clock_t matmultCUDA(const float *a, int id_a, const float *b, int id_b, float *c, int id_c, int n) { float *ac, *bc, *cc; clock_t start, end; start = clock(); // hipMalloc((void**)&ac, sizeof(float) * n * n); // hipMalloc((void**)&bc, sizeof(float) * n * n); // hipMalloc((void**)&cc, sizeof(float) * n * n); // hipMemcpy2D pitch(id_a, id_b, id_c) // hipMemcpy2D(ac, sizeof(float) * n, a, sizeof(float) * id_a, sizeof(float) * n, n, hipMemcpyHostToDevice); // hipMemcpy2D(bc, sizeof(float) * n, b, sizeof(float) * id_b, sizeof(float) * n, n, hipMemcpyHostToDevice); size_t pitch_a, pitch_b, pitch_c; // hipMallocPitch hipMallocPitch((void**)&ac, &pitch_a, sizeof(float) * n, n); hipMallocPitch((void**)&bc, &pitch_b, sizeof(float) * n, n); hipMallocPitch((void**)&cc, &pitch_c, sizeof(float) * n, n); hipMemcpy2D(ac, pitch_a, a, sizeof(float) * id_a, sizeof(float) * n, n, hipMemcpyHostToDevice); hipMemcpy2D(bc, pitch_b, b, sizeof(float) * id_b, sizeof(float) * n, n, hipMemcpyHostToDevice); // int blocks = (n + THREAD_NUM - 1) / THREAD_NUM; // matMultCUDA<<<blocks*n, THREAD_NUM>>>(ac, n, bc, n, cc, n, n); // matMultCUDA<<<n, THREAD_NUM, sizeof(float)*n>>>(ac, n, bc, n, cc, n, n); //matMultCUDA<<<n, THREAD_NUM, sizeof(float)*n>>>(ac, pitch_a / sizeof(float), bc, pitch_b / sizeof(float), cc, pitch_c / sizeof(float), n); int bx = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 blocks(bx, bx); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( matMultCUDA), dim3(blocks), dim3(threads), 0, 0, ac, pitch_a/sizeof(float), bc, pitch_b / sizeof(float), cc, pitch_c / sizeof(float), n ); //hipMemcpy2D(c, sizeof(float) * id_c, cc, sizeof(float)*n, sizeof(float)*n, n, hipMemcpyDeviceToHost); hipMemcpy2D(c, sizeof(float) * id_c, cc, pitch_c, sizeof(float)*n, n, hipMemcpyDeviceToHost); hipFree(ac); hipFree(bc); hipFree(cc); end = clock(); return end - start; } bool calculateMatrixMultiply() { // float *a, *b, *c, *d; int n = 1000; a = (float*)malloc(sizeof(float) * n * n); b = (float*)malloc(sizeof(float) * n * n); c = (float*)malloc(sizeof(float) * n * n); d = (float*)malloc(sizeof(float) * n * n); // () srand(0); generateMatrix(a, n, n); generateMatrix(b, n, n); clock_t time = matmultCUDA(a, n, b, n, c, n, n); multiplyOnCPU(a, n, b, n, d, n, n); compareMatrixError(c, n, d, n, n); double sec = (double)time/CLOCKS_PER_SEC; printf("Time Cost: %.2f(%.2lf GFLOPS)\n", sec, 2.0*n*n*n/(sec*1E9)); return true; }
4aa87574337d25ded072dc8227e0250c09d6d0aa.cu
/* * Copyright (c) 2021 * All rights reserved. */ #include "cuda.h" #include "cuda_util.cu" #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #define DATA_SIZE 1048576 #define THREAD_NUM 256 #define BLOCK_NUM 32 #define BLOCK_SIZE 16 bool initCUDA() { int count = 0; // 可以通过 cudaGetDeviceCount 函数获取 CUDA 的设备数, 函数通过引用传递 count // 值,获取当前支持的 CUDA 设备数。 cudaGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "This is no device.\n"); return false; } int i; for (i = 0; i < count; ++i) { // 可以通过 cudaGetDeviceProperties 函数获取 CUDA 设备的属性 struct cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if (prop.major >= 1) { printDeviceProp(prop); break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x .\n"); return false; } // 设置某一块的Device作为这个主机host上某一个运行线程的设备,这个函数必须在使用_global_的函数或者Runtime的其他API调用之前才能生效 cudaSetDevice(i); return true; } // 计算平方和的函式 // CUDA 提供了一个 clock 函式,可以取得目前的 // timestamp,很适合用来判断一段程序执行所花费的时间(单位为 GPU // 执行单元的频率)。 __global__ static void sumOfSquares(int *num, int *result, clock_t *time) { int sum = 0; int i; clock_t start = clock(); for (i = 0; i < DATA_SIZE; ++i) { sum += num[i] * num[i]; } *result = sum; *time = clock() - start; } // 计算平方和的函式(多个线程) __global__ static void sumOfSquaresParallel(int *num, int *result, clock_t *time) { const int tid = threadIdx.x; int sum = 0; int i; clock_t start; if(tid == 0) { start = clock(); } for(i = tid; i < DATA_SIZE; i = i + THREAD_NUM) { sum += num[i] * num[i]; } result[tid] = sum; if(tid == 0) *time = clock() - start; } // 计算平方和的函式(多个块) __global__ static void sumOfSquaresMultiBlocks(int *num, int *result, clock_t *time) { const int tid = threadIdx.x; const int bid = blockIdx.x; int sum = 0; int i; if(tid == 0) { time[bid] = clock(); } for(i = bid * THREAD_NUM + tid; i < DATA_SIZE; i = i + BLOCK_NUM * THREAD_NUM) { sum += num[i] * num[i]; } result[bid * THREAD_NUM + tid] = sum; if(tid == 0) time[bid + BLOCK_NUM] = clock(); } // 计算平方和的函式(加上Thread的同步) __global__ static void sumOfSquaresSync(int *num, int *result, clock_t *time) { // 声明变量是shared memory,是一个block中的每个thread都共享的内存。 extern __shared__ int shared[]; const int tid = threadIdx.x; const int bid = blockIdx.x; int i; int offset = THREAD_NUM >> 1; if(tid == 0) { time[bid] = clock(); } shared[tid] = 0; for(i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM) { shared[tid] += num[i] * num[i]; } // block中所有的thread都要同步到这个点,才能继续执行。 __syncthreads(); /*** if(tid == 0) { for(int i = 1; i < THREAD_NUM; ++i) { shared[0] += shared[i]; } result[bid] = shared[0]; } ***/ while(offset > 0) { if(tid < offset) { shared[tid] += shared[tid + offset]; } offset >>= 1; __syncthreads(); } if(tid == 0) { result[bid] = shared[0]; time[bid + BLOCK_NUM] = clock(); } } void calculateOnCPU(int *data) { // 在cpu端直接计算各个数的平方和 int final_sum = 0; for(int i = 0; i < DATA_SIZE; ++i) { final_sum += data[i] * data[i]; } printf("sum(CPU): %d\n", final_sum); } void calculateOnGPU(int *data) { // 在显卡中开辟内存,并将数据从主存中复制到显卡内存 int *gpu_data, *result; clock_t *time; cudaMalloc((void **)&gpu_data, sizeof(int) * DATA_SIZE); cudaMalloc((void **)&result, sizeof(int)); cudaMalloc((void **)&time, sizeof(clock_t)); // cudaMemcpy 将产生的随机数复制到显卡内存中 // cudaMemcpyHostToDevice - 从内存复制到显卡内存 // cudaMemcpyDeviceToHost - 从显卡内存复制到内存 cudaMemcpy(gpu_data, data, sizeof(int) * DATA_SIZE, cudaMemcpyHostToDevice); // 函式名称<<<block 数目, thread 数目, shared memory 大小>>>(参数...); sumOfSquares<<<1, 1, 0>>>(gpu_data, result, time); int sum; clock_t time_cost; cudaMemcpy(&sum, result, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&time_cost, time, sizeof(clock_t), cudaMemcpyDeviceToHost); cudaFree(gpu_data); cudaFree(result); cudaFree(time); printf("sum(GPU): %d time cost: %ld\n", sum, time_cost); } void calculateUsingMultiThreads(int *data) { int *gpu_data, *result; clock_t *time; cudaMalloc((void **)&gpu_data, sizeof(int) * DATA_SIZE); cudaMalloc((void **)&result, sizeof(int) * THREAD_NUM); cudaMalloc((void **)&time, sizeof(clock_t)); cudaMemcpy(gpu_data, data, sizeof(int) * DATA_SIZE, cudaMemcpyHostToDevice); // 函式名称中标明线程数量 sumOfSquaresParallel<<<1, THREAD_NUM, 0>>>(gpu_data, result, time); int sum[THREAD_NUM]; clock_t time_cost; cudaMemcpy(&sum, result, sizeof(int) * THREAD_NUM, cudaMemcpyDeviceToHost); cudaMemcpy(&time_cost, time, sizeof(clock_t), cudaMemcpyDeviceToHost); cudaFree(gpu_data); cudaFree(result); cudaFree(time); // 在cpu端把个部分数据的平方和进行加总 int final_sum = 0; for(int i = 0; i < THREAD_NUM; ++i) { final_sum += sum[i]; } printf("sum(GPU, multi threads): %d time: %ld\n", final_sum, time_cost); } void calculateUsingMultiBlocks(int *data) { int *gpu_data, *result; clock_t *time; cudaMalloc((void **)&gpu_data, sizeof(int) * DATA_SIZE); cudaMalloc((void **)&result, sizeof(int) * THREAD_NUM * BLOCK_NUM); cudaMalloc((void **)&time, sizeof(clock_t) * BLOCK_NUM * 2); cudaMemcpy(gpu_data, data, sizeof(int) * DATA_SIZE, cudaMemcpyHostToDevice); // 函式名称<<<block 数目, thread 数目, shared memory 大小>>>(参数...); sumOfSquaresMultiBlocks<<<BLOCK_NUM, THREAD_NUM, 0>>>(gpu_data, result, time); int sum[THREAD_NUM * BLOCK_NUM]; clock_t time_cost[BLOCK_NUM * 2]; cudaMemcpy(&sum, result, sizeof(int) * THREAD_NUM * BLOCK_NUM, cudaMemcpyDeviceToHost); cudaMemcpy(&time_cost, time, sizeof(clock_t) * BLOCK_NUM * 2, cudaMemcpyDeviceToHost); cudaFree(gpu_data); cudaFree(result); cudaFree(time); // 在cpu端把个部分数据的平方和进行加总 int final_sum = 0; for(int i = 0; i < THREAD_NUM * BLOCK_NUM; ++i) { final_sum += sum[i]; } clock_t min_start, max_end; min_start = time_cost[0]; max_end = time_cost[BLOCK_NUM]; for(int i = 0; i < BLOCK_NUM; ++i) { if(min_start > time_cost[i]) { min_start = time_cost[i]; } if(max_end < time_cost[i + BLOCK_NUM]) { max_end = time_cost[i + BLOCK_NUM]; } } printf("sum(GPU, multi blocks): %d time: %ld\n", final_sum, max_end - min_start); } void calculateUsingSync(int *data) { int *gpu_data, *result; clock_t *time; cudaMalloc((void **)&gpu_data, sizeof(int) * DATA_SIZE); cudaMalloc((void **)&result, sizeof(int) * BLOCK_NUM); cudaMalloc((void **)&time, sizeof(clock_t) * BLOCK_NUM * 2); cudaMemcpy(gpu_data, data, sizeof(int) * DATA_SIZE, cudaMemcpyHostToDevice); // 函式名称<<<block 数目, thread 数目, shared memory 大小>>>(参数...); sumOfSquaresSync<<<BLOCK_NUM, THREAD_NUM, THREAD_NUM * sizeof(int)>>>(gpu_data, result, time); int sum[BLOCK_NUM]; clock_t time_cost[BLOCK_NUM * 2]; cudaMemcpy(&sum, result, sizeof(int) * BLOCK_NUM, cudaMemcpyDeviceToHost); cudaMemcpy(&time_cost, time, sizeof(clock_t) * BLOCK_NUM * 2, cudaMemcpyDeviceToHost); cudaFree(gpu_data); cudaFree(result); cudaFree(time); int final_sum = 0; for(int i = 0; i < BLOCK_NUM; ++i) { final_sum += sum[i]; } clock_t min_start, max_end; min_start = time_cost[0]; max_end = time_cost[BLOCK_NUM]; for(int i = 0; i < BLOCK_NUM; ++i) { if(min_start > time_cost[i]) { min_start = time_cost[i]; } if(max_end < time_cost[i + BLOCK_NUM]) { max_end = time_cost[i + BLOCK_NUM]; } } printf("sum(GPU, Sync): %d time: %ld\n", final_sum, max_end - min_start); } bool calculateSumOfSquares() { // 产生随机数 int data[DATA_SIZE]; generateNumbers(data, DATA_SIZE); // 在CPU上计算 calculateOnCPU(data); // 在GPU上计算,one thread, one block calculateOnGPU(data); // 在GPU上计算,multi threads, one block calculateUsingMultiThreads(data); // 在GPU上计算,multi threads, multi blocks calculateUsingMultiBlocks(data); // 在GPU上计算,multi threads, multi blocks, using sync calculateUsingSync(data); return true; } __global__ static void matMultCUDA(const float* a, size_t id_a, const float *b, size_t id_b, float *c, size_t id_c, int n) { /* const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid * blockDim.x + tid; const int row = idx / n; const int col = idx % n; int i; if(row < n && col < n) { float t = 0; for(i = 0; i < n; ++i) { t += a[row*id_a+i] * b[i*id_b+col]; } c[row*id_c+col] = t; } */ extern __shared__ float data[]; const int tid = threadIdx.x; const int row = blockIdx.x; int i, j; for(i = tid; i < n; i += blockDim.x) { data[i] = a[row * id_a + i]; } __syncthreads(); for(j = tid; j < n; j += blockDim.x) { float t = 0; float y = 0; for(i = 0; i < n; ++i) { float r; y -= data[i] * b[i*id_b+j]; r = t - y; y = (r -t) +y; t = r; } c[row*id_c+j] = t; } } __global__ static void matMultCUDAMultiBlocks(const float* a, size_t id_a, const float *b, size_t id_b, float *c, size_t id_c, int n) { } clock_t matmultCUDA(const float *a, int id_a, const float *b, int id_b, float *c, int id_c, int n) { float *ac, *bc, *cc; clock_t start, end; start = clock(); // cudaMalloc((void**)&ac, sizeof(float) * n * n); // cudaMalloc((void**)&bc, sizeof(float) * n * n); // cudaMalloc((void**)&cc, sizeof(float) * n * n); // cudaMemcpy2D 函式,它是用来复制二维数组,可以指定数组的 pitch(即id_a, id_b, id_c) // cudaMemcpy2D(ac, sizeof(float) * n, a, sizeof(float) * id_a, sizeof(float) * n, n, cudaMemcpyHostToDevice); // cudaMemcpy2D(bc, sizeof(float) * n, b, sizeof(float) * id_b, sizeof(float) * n, n, cudaMemcpyHostToDevice); size_t pitch_a, pitch_b, pitch_c; // cudaMallocPitch 的函式,可以自动以最佳的倍数来配置内存。 cudaMallocPitch((void**)&ac, &pitch_a, sizeof(float) * n, n); cudaMallocPitch((void**)&bc, &pitch_b, sizeof(float) * n, n); cudaMallocPitch((void**)&cc, &pitch_c, sizeof(float) * n, n); cudaMemcpy2D(ac, pitch_a, a, sizeof(float) * id_a, sizeof(float) * n, n, cudaMemcpyHostToDevice); cudaMemcpy2D(bc, pitch_b, b, sizeof(float) * id_b, sizeof(float) * n, n, cudaMemcpyHostToDevice); // int blocks = (n + THREAD_NUM - 1) / THREAD_NUM; // matMultCUDA<<<blocks*n, THREAD_NUM>>>(ac, n, bc, n, cc, n, n); // matMultCUDA<<<n, THREAD_NUM, sizeof(float)*n>>>(ac, n, bc, n, cc, n, n); //matMultCUDA<<<n, THREAD_NUM, sizeof(float)*n>>>(ac, pitch_a / sizeof(float), bc, pitch_b / sizeof(float), cc, pitch_c / sizeof(float), n); int bx = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 blocks(bx, bx); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); matMultCUDA<<<blocks, threads>>>(ac, pitch_a/sizeof(float), bc, pitch_b / sizeof(float), cc, pitch_c / sizeof(float), n ); //cudaMemcpy2D(c, sizeof(float) * id_c, cc, sizeof(float)*n, sizeof(float)*n, n, cudaMemcpyDeviceToHost); cudaMemcpy2D(c, sizeof(float) * id_c, cc, pitch_c, sizeof(float)*n, n, cudaMemcpyDeviceToHost); cudaFree(ac); cudaFree(bc); cudaFree(cc); end = clock(); return end - start; } bool calculateMatrixMultiply() { // 产生矩阵 float *a, *b, *c, *d; int n = 1000; a = (float*)malloc(sizeof(float) * n * n); b = (float*)malloc(sizeof(float) * n * n); c = (float*)malloc(sizeof(float) * n * n); d = (float*)malloc(sizeof(float) * n * n); // 随机数产生器(种子值) srand(0); generateMatrix(a, n, n); generateMatrix(b, n, n); clock_t time = matmultCUDA(a, n, b, n, c, n, n); multiplyOnCPU(a, n, b, n, d, n, n); compareMatrixError(c, n, d, n, n); double sec = (double)time/CLOCKS_PER_SEC; printf("Time Cost: %.2f(%.2lf GFLOPS)\n", sec, 2.0*n*n*n/(sec*1E9)); return true; }
d41032264c7fe941b0e6ef9713870b1b6070e19f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are not permit- * ted. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "masked_multihead_attention.h" #include "masked_multihead_attention_utils.h" #include <assert.h> #include <float.h> //#define MMHA_USE_HMMA_FOR_REDUCTION // Below are knobs to extend FP32 accumulation for higher FP16 accuracy // Does not seem to affect the accuracy that much //#define MMHA_USE_FP32_ACUM_FOR_FMA // Seems to slightly improve the accuracy #define MMHA_USE_FP32_ACUM_FOR_OUT #if 0 && defined(MMHA_USE_FP32_ACUM_FOR_OUT) // Does not seem to improve the accuracy //#define MMHA_USE_FP32_ACUM_FOR_LOGITS #endif namespace mmha { //////////////////////////////////////////////////////////////////////////////////////////////////// // // We use the following terminology to describe the different dimensions. // // B: Batch size (number of sequences), // L: Sequence length, // D: Hidden dimension, // H: Number of heads, // Dh: Hidden dimension per head - Dh = D / H. // // The different kernels assign a threadblock for B x H pair. The grid has size (1, B, H). We use // 64, 128 and 256 threads per block. // // Each threadblock loads Dh values from Q and its associated bias. The kernels run a loop to // compute Q * K^T where K is loaded from a cache buffer -- except for the current timestep. The // cache buffer helps with memory accesses and contains keys with bias. // // The layout of the cache buffer for the keys is [B, H, Dh/x, L, x] where x == 8 for FP16 and // x == 4 for FP32 where the fastest moving dimension (contiguous data) is the rightmost one. The // values for x are chosen to create chunks of 16 bytes. // // The different kernels use 1, 2 or 4 threads per key (THREADS_PER_KEY). The size of the LDGs // depends on the number of threads per key. Each thread sums Dh / THREADS_PER_KEY elements. At // the end of each iteration of the Q * K^T loop, we perform a reduction between lanes using an // HMMA instruction (Tensor Core). Each Q * K^T valuey is stored in shared memory in FP32. // // After that loop, a parallel softmax is computed accross the different Q * K^T values stored in // shared memory. // // The kernel ends with a loop over the values in V. We use THREADS_PER_VALUE to control how many // timesteps are computed by loop iteration. As with the keys, the values are read from a cache // except for the current timestep. The layout of the cache buffer for the values is much simpler // as it is [B, H, L, Dh]. // //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T, int Dh > struct Qk_vec_ {}; template<> struct Qk_vec_<float, 32> { using Type = float; }; template<> struct Qk_vec_<float, 64> { using Type = float2; }; template<> struct Qk_vec_<float, 128> { using Type = float4; }; template<> struct Qk_vec_<uint16_t, 32> { using Type = uint32_t; }; template<> struct Qk_vec_<uint16_t, 64> { using Type = uint32_t; }; template<> struct Qk_vec_<uint16_t, 128> { using Type = uint2; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T, int THREADS_PER_KEY > struct K_vec_ {}; template<> struct K_vec_<float, 4> { using Type = float; }; template<> struct K_vec_<float, 2> { using Type = float2; }; template<> struct K_vec_<float, 1> { using Type = float4; }; template<> struct K_vec_<uint16_t, 4> { using Type = uint32_t; }; template<> struct K_vec_<uint16_t, 2> { using Type = uint2; }; template<> struct K_vec_<uint16_t, 1> { using Type = uint4; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T, int V_VEC_SIZE > struct V_vec_ {}; template<> struct V_vec_<float, 1> { using Type = float; }; template<> struct V_vec_<float, 2> { using Type = float2; }; template<> struct V_vec_<float, 4> { using Type = float4; }; template<> struct V_vec_<uint16_t, 2> { using Type = uint32_t; }; template<> struct V_vec_<uint16_t, 4> { using Type = uint2; }; template<> struct V_vec_<uint16_t, 8> { using Type = uint4; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef MMHA_USE_FP32_ACUM_FOR_FMA template< typename T> struct Qk_vec_acum_fp32_ {}; template<> struct Qk_vec_acum_fp32_<float > { using Type = float; }; template<> struct Qk_vec_acum_fp32_<float2 > { using Type = float2; }; template<> struct Qk_vec_acum_fp32_<float4 > { using Type = float4; }; //template<> struct Qk_vec_acum_fp32_<uint16_t> { using Type = float; }; template<> struct Qk_vec_acum_fp32_<uint32_t> { using Type = float2; }; template<> struct Qk_vec_acum_fp32_<uint2 > { using Type = Float4_; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T> struct K_vec_acum_fp32_ {}; template<> struct K_vec_acum_fp32_<float > { using Type = float; }; template<> struct K_vec_acum_fp32_<float2 > { using Type = float2; }; template<> struct K_vec_acum_fp32_<float4 > { using Type = float4; }; template<> struct K_vec_acum_fp32_<uint32_t> { using Type = float2; }; template<> struct K_vec_acum_fp32_<uint2 > { using Type = Float4_; }; template<> struct K_vec_acum_fp32_<uint4 > { using Type = Float8_; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef MMHA_USE_FP32_ACUM_FOR_OUT template< typename T > struct V_vec_acum_fp32_ {}; template<> struct V_vec_acum_fp32_<float > { using Type = float; }; template<> struct V_vec_acum_fp32_<float2 > { using Type = float2; }; template<> struct V_vec_acum_fp32_<float4 > { using Type = float4; }; template<> struct V_vec_acum_fp32_<uint32_t> { using Type = float2; }; template<> struct V_vec_acum_fp32_<uint2 > { using Type = Float4_; }; template<> struct V_vec_acum_fp32_<uint4 > { using Type = Float8_; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template< int THREADS_PER_KEY, typename K_vec, int N > inline __device__ float qk_dot_(const K_vec (&q)[N], const K_vec (&k)[N]) { #ifdef MMHA_USE_FP32_ACUM_FOR_FMA using K_vec_acum = typename K_vec_acum_fp32_<K_vec>::Type; #else using K_vec_acum = K_vec; #endif // Compute the parallel products for Q*K^T (treat vector lanes separately). K_vec_acum qk_vec = mul<K_vec_acum, K_vec, K_vec>(q[0], k[0]); #pragma unroll for( int ii = 1; ii < N; ++ii ) { qk_vec = fma(q[ii], k[ii], qk_vec); } // Finalize the reduction across lanes. float qk = sum(qk_vec); #pragma unroll for( int mask = THREADS_PER_KEY / 2; mask >= 1; mask /= 2 ) { qk += __shfl_xor_sync(uint32_t(-1), qk, mask); } return qk; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T, int THREADS_PER_KEY > struct Qk_dot { template< typename K_vec, int N > static inline __device__ float dot(const K_vec (&q)[N], const K_vec (&k)[N]) { return qk_dot_<THREADS_PER_KEY>(q, k); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float4 hmma_fp32(const uint2 &a, uint32_t b) { float4 c; float zero = 0.f; asm volatile( \ "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 \n" \ " {%0, %1, %2, %3}, \n" \ " {%4, %5}, \n" \ " {%6}, \n" \ " {%7, %7, %7, %7}; \n" \ \ : "=f"(c.x), "=f"(c.y), "=f"(c.z), "=f"(c.w) : "r"(a.x) "r"(a.y) , "r"(b) , "f"(zero)); return c; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int N > inline __device__ float qk_hmma_dot_(const uint32_t (&q)[N], const uint32_t (&k)[N]) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 750 #ifdef MMHA_USE_FP32_ACUM_FOR_FMA using K_vec_acum = typename K_vec_acum_fp32_<uint32_t>::Type; #else using K_vec_acum = uint32_t; #endif K_vec_acum qk_vec = mul<K_vec_acum, uint32_t, uint32_t>(q[0], k[0]); #pragma unroll for( int ii = 1; ii < N; ++ii ) { qk_vec = fma(q[ii], k[ii], qk_vec); } #ifdef MMHA_USE_FP32_ACUM_FOR_FMA uint32_t qk_vec_ = float2_to_half2(qk_vec); return hmma_fp32(make_uint2(qk_vec_, 0u), 0x3c003c00u).x; #else return hmma_fp32(make_uint2(qk_vec, 0u), 0x3c003c00u).x; #endif #else return 0.f; #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// template<> struct Qk_dot<uint16_t, 4> { template< int N > static inline __device__ float dot(const uint32_t (&q)[N], const uint32_t (&k)[N]) { #if __CUDA_ARCH__ >= 750 && defined(MMHA_USE_HMMA_FOR_REDUCTION) return qk_hmma_dot_(q, k); #else return qk_dot_<4>(q, k); #endif // defined MMHA_USE_HMMA_FOR_REDUCTION } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< int WARPS_PER_BLOCK, int WARP_SIZE = 32 > inline __device__ float block_sum(float *red_smem, float sum) { // Decompose the thread index into warp / lane. int warp = threadIdx.x / WARP_SIZE; int lane = threadIdx.x % WARP_SIZE; // Compute the sum per warp. #pragma unroll for( int mask = WARP_SIZE / 2; mask >= 1; mask /= 2 ) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } // Warp leaders store the data to shared memory. if( lane == 0 ) { red_smem[warp] = sum; } // Make sure the data is in shared memory. __syncthreads(); // The warps compute the final sums. if( lane < WARPS_PER_BLOCK ) { sum = red_smem[lane]; } // Parallel reduction inside the warp. #pragma unroll for( int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2 ) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } // Broadcast to other threads. return __shfl_sync(uint32_t(-1), sum, 0); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(float &dst, float src) { dst = src; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(uint16_t &dst, float src) { dst = float_to_half(src); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(uint32_t &dst, float2 src) { dst = float2_to_half2(src); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(uint2 &dst, Float4_ src) { dst.x = float2_to_half2(src.x); dst.y = float2_to_half2(src.y); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(uint4 &dst, Float8_ src) { dst.x = float2_to_half2(src.x); dst.y = float2_to_half2(src.y); dst.z = float2_to_half2(src.z); dst.w = float2_to_half2(src.w); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(float2 &dst, float2 src) { dst = src; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(float4 &dst, float4 src) { dst = src; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float convert_to_float(float4 u) { return u.x; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float convert_to_float(uint4 u) { float2 tmp = half2_to_float2(u.x); return tmp.x; } #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float cast_to_float(float u) { return u; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float2 cast_to_float(float2 u) { return u; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float4 cast_to_float(float4 u) { return u; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ Float4_ cast_to_float(Float4_ u) { return u; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ Float8_ cast_to_float(Float8_ u) { return u; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float2 cast_to_float(uint32_t u) { return half2_to_float2(u); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ Float4_ cast_to_float(uint2 u) { Float4_ tmp; tmp.x = half2_to_float2(u.x); tmp.y = half2_to_float2(u.y); return tmp; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ Float8_ cast_to_float(uint4 u) { Float8_ tmp; tmp.x = half2_to_float2(u.x); tmp.y = half2_to_float2(u.y); tmp.z = half2_to_float2(u.z); tmp.w = half2_to_float2(u.w); return tmp; } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > inline __device__ __host__ T div_up(T m, T n) { return (m + n-1) / n; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > inline size_t smem_size_in_bytes(const Masked_multihead_attention_params<T> &params, int threads_per_value, int threads_per_block) { // The amount of shared memory needed to store the Q*K^T values in float. size_t qk_sz = div_up(params.timestep + 1, 4) * 16; // The extra memory needed if we are not using floats for the final logits. size_t logits_sz = 0; #ifndef MMHA_USE_FP32_ACUM_FOR_LOGITS if( sizeof(T) != 4 ) { logits_sz = div_up(params.seq_length, 4) * 4 * sizeof(T); } #endif // The total size needed during softmax. size_t softmax_sz = qk_sz + logits_sz; // The number of partial rows to reduce in the final reduction. int rows_per_red = threads_per_block / threads_per_value; // The amount of storage needed to finalize the outputs. size_t red_sz = rows_per_red * params.hidden_size_per_head * sizeof(T) / 2; // The max. return max(softmax_sz, red_sz); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ constexpr uint32_t shfl_mask(int threads) { return threads == 32 ? uint32_t(-1) : (1u << threads) - 1u; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< // The type of the inputs. Supported types: float and half. typename T, // The hidden dimension per head. int Dh, // The number of threads per key. int THREADS_PER_KEY, // The number of threads per value. int THREADS_PER_VALUE, // The number of threads in a threadblock. int THREADS_PER_BLOCK > __global__ void masked_multihead_attention_kernel(Masked_multihead_attention_params<T> params) { // Make sure the hidden dimension per head is a multiple of the number of threads per key. static_assert(Dh % THREADS_PER_KEY == 0, ""); // Make sure the hidden dimension per head is a multiple of the number of threads per value. static_assert(Dh % THREADS_PER_VALUE == 0, ""); // The size of a warp. constexpr int WARP_SIZE = 32; // The number of warps in a threadblock. constexpr int WARPS_PER_BLOCK = THREADS_PER_BLOCK / WARP_SIZE; // Use smem_size_in_bytes (above) to determine the amount of shared memory. extern __shared__ char smem_[]; // The shared memory for the Q*K^T values and partial logits in softmax. float *qk_smem = reinterpret_cast<float*>(smem_); // The shared memory for the logits. For FP32, that's the same buffer as qk_smem. char *logits_smem_ = smem_; #ifndef MMHA_USE_FP32_ACUM_FOR_LOGITS if( sizeof(T) != 4 ) { logits_smem_ += div_up(params.timestep + 1, 4) * 16; //sizeof(float); } T *logits_smem = reinterpret_cast<T*>(logits_smem_); #else float *logits_smem = reinterpret_cast<float*>(logits_smem_); #endif // The shared memory to do the final reduction for the output values. Reuse qk_smem. T *out_smem = reinterpret_cast<T*>(smem_); // The shared memory buffers for the block-wide reductions. One for max, one for sum. __shared__ float red_smem[WARPS_PER_BLOCK * 2]; // Shared memory to store Q inputs. __shared__ T q_smem[Dh]; // A vector of Q or K elements for the current timestep. using Qk_vec = typename Qk_vec_<T, Dh>::Type; // The number of elements per vector. constexpr int QK_VEC_SIZE = sizeof(Qk_vec) / sizeof(T); // Make sure the hidden size per head is a multiple of the vector size. static_assert(Dh % QK_VEC_SIZE == 0 && Dh / QK_VEC_SIZE <= WARP_SIZE, ""); // The number of vectors per warp. constexpr int QK_VECS_PER_WARP = Dh / QK_VEC_SIZE; // The layout of the cache is [B, H, Dh/x, L, x] with x == 4/8 for FP32/FP16. Since each thread // owns x elements, we have to decompose the linear index into chunks of x values and the posi- // tion of the thread in that chunk. // The number of elements in a chunk of 16B (that's the x in the above formula). constexpr int QK_ELTS_IN_16B = 16 / sizeof(T); // The number of K vectors in 16B. constexpr int QK_VECS_IN_16B = 16 / sizeof(Qk_vec); // The batch. const int bi = blockIdx.y; if(params.finished != nullptr && params.finished[bi] == true) return; // The head. const int hi = blockIdx.x; // Combine the batch and the head indices. const int bhi = bi * params.num_heads + hi; // The thread in the block. const int tidx = threadIdx.x; // While doing the product Q*K^T for the different keys we track the max. float qk_max = -FLT_MAX; int qkv_base_offset = (params.stride == 0)? bhi*Dh : bi*params.stride + hi*Dh; // First QK_VECS_PER_WARP load Q and K + the bias values for the current timestep. if( tidx < QK_VECS_PER_WARP ) { // The offset in the Q and K buffer also accounts for the batch. int qk_offset = qkv_base_offset + tidx*QK_VEC_SIZE; // The offset in the bias buffer. int qk_bias_offset = hi*Dh + tidx*QK_VEC_SIZE; // Trigger the loads from the Q and K buffers. Qk_vec q = *reinterpret_cast<const Qk_vec*>(&params.q[qk_offset]); Qk_vec k = *reinterpret_cast<const Qk_vec*>(&params.k[qk_offset]); // Trigger the loads from the Q and K bias buffers. Qk_vec q_bias = *reinterpret_cast<const Qk_vec*>(&params.q_bias[qk_bias_offset]); Qk_vec k_bias = *reinterpret_cast<const Qk_vec*>(&params.k_bias[qk_bias_offset]); // Computes the Q/K values with bias. q = add(q, q_bias); k = add(k, k_bias); // Store the Q values to shared memory. *reinterpret_cast<Qk_vec*>(&q_smem[tidx*QK_VEC_SIZE]) = q; // Write the K values to the global memory cache. // // NOTE: The stores are uncoalesced as we have multiple chunks of 16B spread across the memory // system. We designed it this way as it allows much better memory loads (and there are many // more loads) + the stores are really "write and forget" since we won't need the ack before // the end of the kernel. There's plenty of time for the transactions to complete. // The 16B chunk written by the thread. int co = tidx / QK_VECS_IN_16B; // The position of the thread in that 16B chunk. int ci = tidx % QK_VECS_IN_16B * QK_VEC_SIZE; // Two chunks are separated by L * x elements. A thread write QK_VEC_SIZE elements. int offset = bhi*params.seq_length*Dh + co*params.seq_length*QK_ELTS_IN_16B + params.timestep*QK_ELTS_IN_16B + ci; // Trigger the stores to global memory. *reinterpret_cast<Qk_vec*>(&params.k_cache[offset]) = k; // Compute \sum_i Q[i] * K^T[i] for the current timestep. #ifdef MMHA_USE_FP32_ACUM_FOR_FMA using Qk_vec_acum = typename Qk_vec_acum_fp32_<Qk_vec>::Type; #else using Qk_vec_acum = Qk_vec; #endif float qk = dot<Qk_vec_acum, Qk_vec>(q, k); #pragma unroll for( int mask = QK_VECS_PER_WARP / 2; mask >= 1; mask /= 2 ) { qk += __shfl_xor_sync(shfl_mask(QK_VECS_PER_WARP), qk, mask); } // Normalize qk. qk *= params.inv_sqrt_dh; // Store that value in shared memory. Keep the Q*K^T value in register for softmax. if( tidx == 0 ) { qk_max = qk; qk_smem[params.timestep] = qk; } } // Make sure the data is in shared memory. __syncthreads(); // The type of queries and keys for the math in the Q*K^T product. using K_vec = typename K_vec_<T, THREADS_PER_KEY>::Type; // The number of elements per vector. constexpr int K_VEC_SIZE = sizeof(K_vec) / sizeof(T); // Make sure the hidden size per head is a multiple of the vector size. static_assert(Dh % K_VEC_SIZE == 0, ""); // The number of elements per thread. constexpr int K_ELTS_PER_THREAD = Dh / THREADS_PER_KEY; // The number of vectors per thread. constexpr int K_VECS_PER_THREAD = K_ELTS_PER_THREAD / K_VEC_SIZE; // The position the first key loaded by each thread from the cache buffer (for this B * H). int ko = tidx / THREADS_PER_KEY; // The position of the thread in the chunk of keys. int ki = tidx % THREADS_PER_KEY * K_VEC_SIZE; // Load the Q values from shared memory. The values are reused during the loop on K. K_vec q[K_VECS_PER_THREAD]; #pragma unroll for( int ii = 0; ii < K_VECS_PER_THREAD; ++ii ) { q[ii] = *reinterpret_cast<const K_vec*>(&q_smem[ki + ii*THREADS_PER_KEY*K_VEC_SIZE]); } // The number of timesteps loaded per iteration. constexpr int K_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_KEY; // The number of keys per warp. constexpr int K_PER_WARP = WARP_SIZE / THREADS_PER_KEY; // The base pointer for the key in the cache buffer. T *k_cache = &params.k_cache[bhi*params.seq_length*Dh + ki]; // Pick a number of keys to make sure all the threads of a warp enter (due to shfl_sync). int ti_end = div_up(params.timestep, K_PER_WARP) * K_PER_WARP; // Iterate over the keys/timesteps to compute the various (Q*K^T)_{ti} values. for( int ti = ko; ti < ti_end; ti += K_PER_ITER ) { // The keys loaded from the key cache. K_vec k[K_VECS_PER_THREAD]; #pragma unroll for( int ii = 0; ii < K_VECS_PER_THREAD; ++ii ) { int jj = ii * params.seq_length + ti; if( ti < params.timestep ) { k[ii] = *reinterpret_cast<const K_vec*>(&k_cache[jj*QK_ELTS_IN_16B]); } } // Perform the dot product and normalize qk. // // WARNING: ALL THE THREADS OF A WARP MUST ENTER!!! float qk = Qk_dot<T, THREADS_PER_KEY>::dot(q, k) * params.inv_sqrt_dh; bool is_mask = params.is_mask? (ti >= params.input_lengths[bi] && ti < params.max_input_len) : false; // Store the product to shared memory. There's one qk value per timestep. Update the max. if( ti < params.timestep && tidx % THREADS_PER_KEY == 0 ) { qk_max = is_mask? qk_max : fmaxf(qk_max, qk); qk_smem[ti] = qk; } } // Perform the final reduction to compute the max inside each warp. // // NOTE: In a group of THREADS_PER_KEY threads, the leader already has the max value for the // group so it's not needed to run the reduction inside the group (again). #pragma unroll for( int mask = WARP_SIZE / 2; mask >= THREADS_PER_KEY; mask /= 2 ) { qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask)); } // Decompose the thread index into warp and lane. const int warp = tidx / WARP_SIZE; const int lane = tidx % WARP_SIZE; // The warp leader writes the max to shared memory. if( lane == 0 ) { red_smem[warp] = qk_max; } // Make sure the products are in shared memory. __syncthreads(); // The warps finalize the reduction. qk_max = lane < WARPS_PER_BLOCK ? red_smem[lane] : -FLT_MAX; #pragma unroll for( int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2 ) { qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask)); } // Broadcast to all the threads in the warp. qk_max = __shfl_sync(uint32_t(-1), qk_max, 0); // Compute the logits and start the sum. float sum = 0.f; for( int ti = tidx; ti <= params.timestep; ti += THREADS_PER_BLOCK ) { bool is_mask = params.is_mask? (ti >= params.input_lengths[bi] && ti < params.max_input_len) : false; float logit = is_mask? 0.f : __expf(qk_smem[ti] - qk_max); sum += logit; qk_smem[ti] = logit; } // Compute the sum. sum = block_sum<WARPS_PER_BLOCK>(&red_smem[WARPS_PER_BLOCK], sum); // Normalize the logits. float inv_sum = __fdividef(1.f, sum + 1.e-6f); for( int ti = tidx; ti <= params.timestep; ti += THREADS_PER_BLOCK ) { convert_from_float(logits_smem[ti], qk_smem[ti] * inv_sum); } // Make sure the logits are in shared memory. __syncthreads(); // The number of elements per vector. constexpr int V_VEC_SIZE = Dh / THREADS_PER_VALUE; // A vector of V elements for the current timestep. using V_vec = typename V_vec_<T, V_VEC_SIZE>::Type; // The value computed by this thread. int vo = tidx / THREADS_PER_VALUE; // The hidden dimensions computed by this particular thread. int vi = tidx % THREADS_PER_VALUE * V_VEC_SIZE; // The base pointer for the value in the cache buffer. T *v_cache = &params.v_cache[bhi*params.seq_length*Dh + vi]; #ifdef MMHA_USE_FP32_ACUM_FOR_OUT using V_vec_acum = typename V_vec_acum_fp32_<V_vec>::Type; #else using V_vec_acum = V_vec; #endif // The partial outputs computed by each thread. V_vec_acum out; zero(out); // The number of values processed per iteration of the loop. constexpr int V_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_VALUE; // Loop over the timesteps to compute the partial outputs. for( int ti = vo; ti < params.timestep; ti += V_PER_ITER ) { // Load the values from the cache. V_vec v = *reinterpret_cast<const V_vec*>(&v_cache[ti*Dh]); // Load the logits from shared memory. #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) float logit = logits_smem[ti]; out = fma(logit, cast_to_float(v), out); #else T logit = logits_smem[ti]; // Update the partial sums. out = fma(logit, v, out); #endif } // One group of threads computes the product(s) for the current timestep. if( vo == params.timestep % V_PER_ITER ) { // Trigger the loads from the V buffer. V_vec v = *reinterpret_cast<const V_vec*>(&params.v[qkv_base_offset + vi]); // Trigger the loads from the V bias buffer. V_vec v_bias = *reinterpret_cast<const V_vec*>(&params.v_bias[hi*Dh + vi]); // Compute the V values with bias. v = add(v, v_bias); // Store the values with bias back to global memory in the cache for V. *reinterpret_cast<V_vec*>(&v_cache[params.timestep*Dh]) = v; // Initialize the output value with the current timestep. #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) out = fma(logits_smem[params.timestep], cast_to_float(v), out); #else out = fma(logits_smem[params.timestep], v, out); #endif } // Make sure we can start writing to shared memory. __syncthreads(); // Run the final reduction amongst the different groups computing different partial outputs. #pragma unroll for( int active_groups = V_PER_ITER; active_groups >= 2; active_groups /= 2 ) { // The midpoint in the number of active groups. int midpoint = active_groups / 2; // The upper part of active threads store to shared memory. if( vo >= midpoint && vo < active_groups ) { #ifdef MMHA_USE_FP32_ACUM_FOR_OUT convert_from_float(*reinterpret_cast<V_vec*>(&out_smem[(vo - midpoint)*Dh + vi]), out); #else *reinterpret_cast<V_vec*>(&out_smem[(vo - midpoint)*Dh + vi]) = out; #endif } __syncthreads(); // The bottom warps update their values. if( vo < midpoint ) { out = add(*reinterpret_cast<const V_vec*>(&out_smem[vo*Dh + vi]), out); } __syncthreads(); } // Output the final values. if( vo == 0 ) { #ifdef MMHA_USE_FP32_ACUM_FOR_OUT convert_from_float(*reinterpret_cast<V_vec*>(&params.out[bhi*Dh + vi]), out); #else *reinterpret_cast<V_vec*>(&params.out[bhi*Dh + vi]) = out; #endif } } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace mmha //////////////////////////////////////////////////////////////////////////////////////////////////// #define MMHA_LAUNCH_KERNEL(T, Dh, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, stream) \ size_t smem_sz = mmha::smem_size_in_bytes<T>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \ dim3 grid(params.num_heads, params.batch_size); \ hipLaunchKernelGGL(( mmha::masked_multihead_attention_kernel<T, Dh, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK>) \ , dim3(grid), dim3(THDS_PER_BLOCK), smem_sz, stream, params) //////////////////////////////////////////////////////////////////////////////////////////////////// template < typename T, int Dh > void mmha_launch_kernel(const Masked_multihead_attention_params<T> &params, const hipStream_t &stream) { constexpr int THREADS_PER_VALUE = Dh * sizeof(T) / 16; if( params.timestep < 32 ) { MMHA_LAUNCH_KERNEL(T, Dh, 4, THREADS_PER_VALUE, 64, stream); } else if( params.timestep < 2048 ) { MMHA_LAUNCH_KERNEL(T, Dh, 2, THREADS_PER_VALUE, 128, stream); } else { MMHA_LAUNCH_KERNEL(T, Dh, 1, THREADS_PER_VALUE, 256, stream); } } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > void masked_multihead_attention_(const Masked_multihead_attention_params<T> &params, const hipStream_t &stream) { switch ( params.hidden_size_per_head ) { case 32: mmha_launch_kernel<T, 32>(params, stream); break; case 64: mmha_launch_kernel<T, 64>(params, stream); break; case 128: mmha_launch_kernel<T, 128>(params, stream); break; default: assert(false); } } //////////////////////////////////////////////////////////////////////////////////////////////////// void masked_multihead_attention(const Masked_multihead_attention_params<float> &params, const hipStream_t &stream) { masked_multihead_attention_(params, stream); } //////////////////////////////////////////////////////////////////////////////////////////////////// void masked_multihead_attention(const Masked_multihead_attention_params<uint16_t> &params, const hipStream_t &stream) { masked_multihead_attention_(params, stream); } //////////////////////////////////////////////////////////////////////////////////////////////////// #undef MMHA_LAUNCH_KERNEL
d41032264c7fe941b0e6ef9713870b1b6070e19f.cu
/*************************************************************************************************** * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are not permit- * ted. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "masked_multihead_attention.h" #include "masked_multihead_attention_utils.h" #include <assert.h> #include <float.h> //#define MMHA_USE_HMMA_FOR_REDUCTION // Below are knobs to extend FP32 accumulation for higher FP16 accuracy // Does not seem to affect the accuracy that much //#define MMHA_USE_FP32_ACUM_FOR_FMA // Seems to slightly improve the accuracy #define MMHA_USE_FP32_ACUM_FOR_OUT #if 0 && defined(MMHA_USE_FP32_ACUM_FOR_OUT) // Does not seem to improve the accuracy //#define MMHA_USE_FP32_ACUM_FOR_LOGITS #endif namespace mmha { //////////////////////////////////////////////////////////////////////////////////////////////////// // // We use the following terminology to describe the different dimensions. // // B: Batch size (number of sequences), // L: Sequence length, // D: Hidden dimension, // H: Number of heads, // Dh: Hidden dimension per head - Dh = D / H. // // The different kernels assign a threadblock for B x H pair. The grid has size (1, B, H). We use // 64, 128 and 256 threads per block. // // Each threadblock loads Dh values from Q and its associated bias. The kernels run a loop to // compute Q * K^T where K is loaded from a cache buffer -- except for the current timestep. The // cache buffer helps with memory accesses and contains keys with bias. // // The layout of the cache buffer for the keys is [B, H, Dh/x, L, x] where x == 8 for FP16 and // x == 4 for FP32 where the fastest moving dimension (contiguous data) is the rightmost one. The // values for x are chosen to create chunks of 16 bytes. // // The different kernels use 1, 2 or 4 threads per key (THREADS_PER_KEY). The size of the LDGs // depends on the number of threads per key. Each thread sums Dh / THREADS_PER_KEY elements. At // the end of each iteration of the Q * K^T loop, we perform a reduction between lanes using an // HMMA instruction (Tensor Core). Each Q * K^T valuey is stored in shared memory in FP32. // // After that loop, a parallel softmax is computed accross the different Q * K^T values stored in // shared memory. // // The kernel ends with a loop over the values in V. We use THREADS_PER_VALUE to control how many // timesteps are computed by loop iteration. As with the keys, the values are read from a cache // except for the current timestep. The layout of the cache buffer for the values is much simpler // as it is [B, H, L, Dh]. // //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T, int Dh > struct Qk_vec_ {}; template<> struct Qk_vec_<float, 32> { using Type = float; }; template<> struct Qk_vec_<float, 64> { using Type = float2; }; template<> struct Qk_vec_<float, 128> { using Type = float4; }; template<> struct Qk_vec_<uint16_t, 32> { using Type = uint32_t; }; template<> struct Qk_vec_<uint16_t, 64> { using Type = uint32_t; }; template<> struct Qk_vec_<uint16_t, 128> { using Type = uint2; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T, int THREADS_PER_KEY > struct K_vec_ {}; template<> struct K_vec_<float, 4> { using Type = float; }; template<> struct K_vec_<float, 2> { using Type = float2; }; template<> struct K_vec_<float, 1> { using Type = float4; }; template<> struct K_vec_<uint16_t, 4> { using Type = uint32_t; }; template<> struct K_vec_<uint16_t, 2> { using Type = uint2; }; template<> struct K_vec_<uint16_t, 1> { using Type = uint4; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T, int V_VEC_SIZE > struct V_vec_ {}; template<> struct V_vec_<float, 1> { using Type = float; }; template<> struct V_vec_<float, 2> { using Type = float2; }; template<> struct V_vec_<float, 4> { using Type = float4; }; template<> struct V_vec_<uint16_t, 2> { using Type = uint32_t; }; template<> struct V_vec_<uint16_t, 4> { using Type = uint2; }; template<> struct V_vec_<uint16_t, 8> { using Type = uint4; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef MMHA_USE_FP32_ACUM_FOR_FMA template< typename T> struct Qk_vec_acum_fp32_ {}; template<> struct Qk_vec_acum_fp32_<float > { using Type = float; }; template<> struct Qk_vec_acum_fp32_<float2 > { using Type = float2; }; template<> struct Qk_vec_acum_fp32_<float4 > { using Type = float4; }; //template<> struct Qk_vec_acum_fp32_<uint16_t> { using Type = float; }; template<> struct Qk_vec_acum_fp32_<uint32_t> { using Type = float2; }; template<> struct Qk_vec_acum_fp32_<uint2 > { using Type = Float4_; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T> struct K_vec_acum_fp32_ {}; template<> struct K_vec_acum_fp32_<float > { using Type = float; }; template<> struct K_vec_acum_fp32_<float2 > { using Type = float2; }; template<> struct K_vec_acum_fp32_<float4 > { using Type = float4; }; template<> struct K_vec_acum_fp32_<uint32_t> { using Type = float2; }; template<> struct K_vec_acum_fp32_<uint2 > { using Type = Float4_; }; template<> struct K_vec_acum_fp32_<uint4 > { using Type = Float8_; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef MMHA_USE_FP32_ACUM_FOR_OUT template< typename T > struct V_vec_acum_fp32_ {}; template<> struct V_vec_acum_fp32_<float > { using Type = float; }; template<> struct V_vec_acum_fp32_<float2 > { using Type = float2; }; template<> struct V_vec_acum_fp32_<float4 > { using Type = float4; }; template<> struct V_vec_acum_fp32_<uint32_t> { using Type = float2; }; template<> struct V_vec_acum_fp32_<uint2 > { using Type = Float4_; }; template<> struct V_vec_acum_fp32_<uint4 > { using Type = Float8_; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template< int THREADS_PER_KEY, typename K_vec, int N > inline __device__ float qk_dot_(const K_vec (&q)[N], const K_vec (&k)[N]) { #ifdef MMHA_USE_FP32_ACUM_FOR_FMA using K_vec_acum = typename K_vec_acum_fp32_<K_vec>::Type; #else using K_vec_acum = K_vec; #endif // Compute the parallel products for Q*K^T (treat vector lanes separately). K_vec_acum qk_vec = mul<K_vec_acum, K_vec, K_vec>(q[0], k[0]); #pragma unroll for( int ii = 1; ii < N; ++ii ) { qk_vec = fma(q[ii], k[ii], qk_vec); } // Finalize the reduction across lanes. float qk = sum(qk_vec); #pragma unroll for( int mask = THREADS_PER_KEY / 2; mask >= 1; mask /= 2 ) { qk += __shfl_xor_sync(uint32_t(-1), qk, mask); } return qk; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T, int THREADS_PER_KEY > struct Qk_dot { template< typename K_vec, int N > static inline __device__ float dot(const K_vec (&q)[N], const K_vec (&k)[N]) { return qk_dot_<THREADS_PER_KEY>(q, k); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float4 hmma_fp32(const uint2 &a, uint32_t b) { float4 c; float zero = 0.f; asm volatile( \ "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 \n" \ " {%0, %1, %2, %3}, \n" \ " {%4, %5}, \n" \ " {%6}, \n" \ " {%7, %7, %7, %7}; \n" \ \ : "=f"(c.x), "=f"(c.y), "=f"(c.z), "=f"(c.w) : "r"(a.x) "r"(a.y) , "r"(b) , "f"(zero)); return c; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int N > inline __device__ float qk_hmma_dot_(const uint32_t (&q)[N], const uint32_t (&k)[N]) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 750 #ifdef MMHA_USE_FP32_ACUM_FOR_FMA using K_vec_acum = typename K_vec_acum_fp32_<uint32_t>::Type; #else using K_vec_acum = uint32_t; #endif K_vec_acum qk_vec = mul<K_vec_acum, uint32_t, uint32_t>(q[0], k[0]); #pragma unroll for( int ii = 1; ii < N; ++ii ) { qk_vec = fma(q[ii], k[ii], qk_vec); } #ifdef MMHA_USE_FP32_ACUM_FOR_FMA uint32_t qk_vec_ = float2_to_half2(qk_vec); return hmma_fp32(make_uint2(qk_vec_, 0u), 0x3c003c00u).x; #else return hmma_fp32(make_uint2(qk_vec, 0u), 0x3c003c00u).x; #endif #else return 0.f; #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// template<> struct Qk_dot<uint16_t, 4> { template< int N > static inline __device__ float dot(const uint32_t (&q)[N], const uint32_t (&k)[N]) { #if __CUDA_ARCH__ >= 750 && defined(MMHA_USE_HMMA_FOR_REDUCTION) return qk_hmma_dot_(q, k); #else return qk_dot_<4>(q, k); #endif // defined MMHA_USE_HMMA_FOR_REDUCTION } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< int WARPS_PER_BLOCK, int WARP_SIZE = 32 > inline __device__ float block_sum(float *red_smem, float sum) { // Decompose the thread index into warp / lane. int warp = threadIdx.x / WARP_SIZE; int lane = threadIdx.x % WARP_SIZE; // Compute the sum per warp. #pragma unroll for( int mask = WARP_SIZE / 2; mask >= 1; mask /= 2 ) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } // Warp leaders store the data to shared memory. if( lane == 0 ) { red_smem[warp] = sum; } // Make sure the data is in shared memory. __syncthreads(); // The warps compute the final sums. if( lane < WARPS_PER_BLOCK ) { sum = red_smem[lane]; } // Parallel reduction inside the warp. #pragma unroll for( int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2 ) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } // Broadcast to other threads. return __shfl_sync(uint32_t(-1), sum, 0); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(float &dst, float src) { dst = src; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(uint16_t &dst, float src) { dst = float_to_half(src); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(uint32_t &dst, float2 src) { dst = float2_to_half2(src); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(uint2 &dst, Float4_ src) { dst.x = float2_to_half2(src.x); dst.y = float2_to_half2(src.y); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(uint4 &dst, Float8_ src) { dst.x = float2_to_half2(src.x); dst.y = float2_to_half2(src.y); dst.z = float2_to_half2(src.z); dst.w = float2_to_half2(src.w); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(float2 &dst, float2 src) { dst = src; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ void convert_from_float(float4 &dst, float4 src) { dst = src; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float convert_to_float(float4 u) { return u.x; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float convert_to_float(uint4 u) { float2 tmp = half2_to_float2(u.x); return tmp.x; } #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float cast_to_float(float u) { return u; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float2 cast_to_float(float2 u) { return u; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float4 cast_to_float(float4 u) { return u; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ Float4_ cast_to_float(Float4_ u) { return u; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ Float8_ cast_to_float(Float8_ u) { return u; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ float2 cast_to_float(uint32_t u) { return half2_to_float2(u); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ Float4_ cast_to_float(uint2 u) { Float4_ tmp; tmp.x = half2_to_float2(u.x); tmp.y = half2_to_float2(u.y); return tmp; } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ Float8_ cast_to_float(uint4 u) { Float8_ tmp; tmp.x = half2_to_float2(u.x); tmp.y = half2_to_float2(u.y); tmp.z = half2_to_float2(u.z); tmp.w = half2_to_float2(u.w); return tmp; } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > inline __device__ __host__ T div_up(T m, T n) { return (m + n-1) / n; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > inline size_t smem_size_in_bytes(const Masked_multihead_attention_params<T> &params, int threads_per_value, int threads_per_block) { // The amount of shared memory needed to store the Q*K^T values in float. size_t qk_sz = div_up(params.timestep + 1, 4) * 16; // The extra memory needed if we are not using floats for the final logits. size_t logits_sz = 0; #ifndef MMHA_USE_FP32_ACUM_FOR_LOGITS if( sizeof(T) != 4 ) { logits_sz = div_up(params.seq_length, 4) * 4 * sizeof(T); } #endif // The total size needed during softmax. size_t softmax_sz = qk_sz + logits_sz; // The number of partial rows to reduce in the final reduction. int rows_per_red = threads_per_block / threads_per_value; // The amount of storage needed to finalize the outputs. size_t red_sz = rows_per_red * params.hidden_size_per_head * sizeof(T) / 2; // The max. return max(softmax_sz, red_sz); } //////////////////////////////////////////////////////////////////////////////////////////////////// inline __device__ constexpr uint32_t shfl_mask(int threads) { return threads == 32 ? uint32_t(-1) : (1u << threads) - 1u; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< // The type of the inputs. Supported types: float and half. typename T, // The hidden dimension per head. int Dh, // The number of threads per key. int THREADS_PER_KEY, // The number of threads per value. int THREADS_PER_VALUE, // The number of threads in a threadblock. int THREADS_PER_BLOCK > __global__ void masked_multihead_attention_kernel(Masked_multihead_attention_params<T> params) { // Make sure the hidden dimension per head is a multiple of the number of threads per key. static_assert(Dh % THREADS_PER_KEY == 0, ""); // Make sure the hidden dimension per head is a multiple of the number of threads per value. static_assert(Dh % THREADS_PER_VALUE == 0, ""); // The size of a warp. constexpr int WARP_SIZE = 32; // The number of warps in a threadblock. constexpr int WARPS_PER_BLOCK = THREADS_PER_BLOCK / WARP_SIZE; // Use smem_size_in_bytes (above) to determine the amount of shared memory. extern __shared__ char smem_[]; // The shared memory for the Q*K^T values and partial logits in softmax. float *qk_smem = reinterpret_cast<float*>(smem_); // The shared memory for the logits. For FP32, that's the same buffer as qk_smem. char *logits_smem_ = smem_; #ifndef MMHA_USE_FP32_ACUM_FOR_LOGITS if( sizeof(T) != 4 ) { logits_smem_ += div_up(params.timestep + 1, 4) * 16; //sizeof(float); } T *logits_smem = reinterpret_cast<T*>(logits_smem_); #else float *logits_smem = reinterpret_cast<float*>(logits_smem_); #endif // The shared memory to do the final reduction for the output values. Reuse qk_smem. T *out_smem = reinterpret_cast<T*>(smem_); // The shared memory buffers for the block-wide reductions. One for max, one for sum. __shared__ float red_smem[WARPS_PER_BLOCK * 2]; // Shared memory to store Q inputs. __shared__ T q_smem[Dh]; // A vector of Q or K elements for the current timestep. using Qk_vec = typename Qk_vec_<T, Dh>::Type; // The number of elements per vector. constexpr int QK_VEC_SIZE = sizeof(Qk_vec) / sizeof(T); // Make sure the hidden size per head is a multiple of the vector size. static_assert(Dh % QK_VEC_SIZE == 0 && Dh / QK_VEC_SIZE <= WARP_SIZE, ""); // The number of vectors per warp. constexpr int QK_VECS_PER_WARP = Dh / QK_VEC_SIZE; // The layout of the cache is [B, H, Dh/x, L, x] with x == 4/8 for FP32/FP16. Since each thread // owns x elements, we have to decompose the linear index into chunks of x values and the posi- // tion of the thread in that chunk. // The number of elements in a chunk of 16B (that's the x in the above formula). constexpr int QK_ELTS_IN_16B = 16 / sizeof(T); // The number of K vectors in 16B. constexpr int QK_VECS_IN_16B = 16 / sizeof(Qk_vec); // The batch. const int bi = blockIdx.y; if(params.finished != nullptr && params.finished[bi] == true) return; // The head. const int hi = blockIdx.x; // Combine the batch and the head indices. const int bhi = bi * params.num_heads + hi; // The thread in the block. const int tidx = threadIdx.x; // While doing the product Q*K^T for the different keys we track the max. float qk_max = -FLT_MAX; int qkv_base_offset = (params.stride == 0)? bhi*Dh : bi*params.stride + hi*Dh; // First QK_VECS_PER_WARP load Q and K + the bias values for the current timestep. if( tidx < QK_VECS_PER_WARP ) { // The offset in the Q and K buffer also accounts for the batch. int qk_offset = qkv_base_offset + tidx*QK_VEC_SIZE; // The offset in the bias buffer. int qk_bias_offset = hi*Dh + tidx*QK_VEC_SIZE; // Trigger the loads from the Q and K buffers. Qk_vec q = *reinterpret_cast<const Qk_vec*>(&params.q[qk_offset]); Qk_vec k = *reinterpret_cast<const Qk_vec*>(&params.k[qk_offset]); // Trigger the loads from the Q and K bias buffers. Qk_vec q_bias = *reinterpret_cast<const Qk_vec*>(&params.q_bias[qk_bias_offset]); Qk_vec k_bias = *reinterpret_cast<const Qk_vec*>(&params.k_bias[qk_bias_offset]); // Computes the Q/K values with bias. q = add(q, q_bias); k = add(k, k_bias); // Store the Q values to shared memory. *reinterpret_cast<Qk_vec*>(&q_smem[tidx*QK_VEC_SIZE]) = q; // Write the K values to the global memory cache. // // NOTE: The stores are uncoalesced as we have multiple chunks of 16B spread across the memory // system. We designed it this way as it allows much better memory loads (and there are many // more loads) + the stores are really "write and forget" since we won't need the ack before // the end of the kernel. There's plenty of time for the transactions to complete. // The 16B chunk written by the thread. int co = tidx / QK_VECS_IN_16B; // The position of the thread in that 16B chunk. int ci = tidx % QK_VECS_IN_16B * QK_VEC_SIZE; // Two chunks are separated by L * x elements. A thread write QK_VEC_SIZE elements. int offset = bhi*params.seq_length*Dh + co*params.seq_length*QK_ELTS_IN_16B + params.timestep*QK_ELTS_IN_16B + ci; // Trigger the stores to global memory. *reinterpret_cast<Qk_vec*>(&params.k_cache[offset]) = k; // Compute \sum_i Q[i] * K^T[i] for the current timestep. #ifdef MMHA_USE_FP32_ACUM_FOR_FMA using Qk_vec_acum = typename Qk_vec_acum_fp32_<Qk_vec>::Type; #else using Qk_vec_acum = Qk_vec; #endif float qk = dot<Qk_vec_acum, Qk_vec>(q, k); #pragma unroll for( int mask = QK_VECS_PER_WARP / 2; mask >= 1; mask /= 2 ) { qk += __shfl_xor_sync(shfl_mask(QK_VECS_PER_WARP), qk, mask); } // Normalize qk. qk *= params.inv_sqrt_dh; // Store that value in shared memory. Keep the Q*K^T value in register for softmax. if( tidx == 0 ) { qk_max = qk; qk_smem[params.timestep] = qk; } } // Make sure the data is in shared memory. __syncthreads(); // The type of queries and keys for the math in the Q*K^T product. using K_vec = typename K_vec_<T, THREADS_PER_KEY>::Type; // The number of elements per vector. constexpr int K_VEC_SIZE = sizeof(K_vec) / sizeof(T); // Make sure the hidden size per head is a multiple of the vector size. static_assert(Dh % K_VEC_SIZE == 0, ""); // The number of elements per thread. constexpr int K_ELTS_PER_THREAD = Dh / THREADS_PER_KEY; // The number of vectors per thread. constexpr int K_VECS_PER_THREAD = K_ELTS_PER_THREAD / K_VEC_SIZE; // The position the first key loaded by each thread from the cache buffer (for this B * H). int ko = tidx / THREADS_PER_KEY; // The position of the thread in the chunk of keys. int ki = tidx % THREADS_PER_KEY * K_VEC_SIZE; // Load the Q values from shared memory. The values are reused during the loop on K. K_vec q[K_VECS_PER_THREAD]; #pragma unroll for( int ii = 0; ii < K_VECS_PER_THREAD; ++ii ) { q[ii] = *reinterpret_cast<const K_vec*>(&q_smem[ki + ii*THREADS_PER_KEY*K_VEC_SIZE]); } // The number of timesteps loaded per iteration. constexpr int K_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_KEY; // The number of keys per warp. constexpr int K_PER_WARP = WARP_SIZE / THREADS_PER_KEY; // The base pointer for the key in the cache buffer. T *k_cache = &params.k_cache[bhi*params.seq_length*Dh + ki]; // Pick a number of keys to make sure all the threads of a warp enter (due to shfl_sync). int ti_end = div_up(params.timestep, K_PER_WARP) * K_PER_WARP; // Iterate over the keys/timesteps to compute the various (Q*K^T)_{ti} values. for( int ti = ko; ti < ti_end; ti += K_PER_ITER ) { // The keys loaded from the key cache. K_vec k[K_VECS_PER_THREAD]; #pragma unroll for( int ii = 0; ii < K_VECS_PER_THREAD; ++ii ) { int jj = ii * params.seq_length + ti; if( ti < params.timestep ) { k[ii] = *reinterpret_cast<const K_vec*>(&k_cache[jj*QK_ELTS_IN_16B]); } } // Perform the dot product and normalize qk. // // WARNING: ALL THE THREADS OF A WARP MUST ENTER!!! float qk = Qk_dot<T, THREADS_PER_KEY>::dot(q, k) * params.inv_sqrt_dh; bool is_mask = params.is_mask? (ti >= params.input_lengths[bi] && ti < params.max_input_len) : false; // Store the product to shared memory. There's one qk value per timestep. Update the max. if( ti < params.timestep && tidx % THREADS_PER_KEY == 0 ) { qk_max = is_mask? qk_max : fmaxf(qk_max, qk); qk_smem[ti] = qk; } } // Perform the final reduction to compute the max inside each warp. // // NOTE: In a group of THREADS_PER_KEY threads, the leader already has the max value for the // group so it's not needed to run the reduction inside the group (again). #pragma unroll for( int mask = WARP_SIZE / 2; mask >= THREADS_PER_KEY; mask /= 2 ) { qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask)); } // Decompose the thread index into warp and lane. const int warp = tidx / WARP_SIZE; const int lane = tidx % WARP_SIZE; // The warp leader writes the max to shared memory. if( lane == 0 ) { red_smem[warp] = qk_max; } // Make sure the products are in shared memory. __syncthreads(); // The warps finalize the reduction. qk_max = lane < WARPS_PER_BLOCK ? red_smem[lane] : -FLT_MAX; #pragma unroll for( int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2 ) { qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask)); } // Broadcast to all the threads in the warp. qk_max = __shfl_sync(uint32_t(-1), qk_max, 0); // Compute the logits and start the sum. float sum = 0.f; for( int ti = tidx; ti <= params.timestep; ti += THREADS_PER_BLOCK ) { bool is_mask = params.is_mask? (ti >= params.input_lengths[bi] && ti < params.max_input_len) : false; float logit = is_mask? 0.f : __expf(qk_smem[ti] - qk_max); sum += logit; qk_smem[ti] = logit; } // Compute the sum. sum = block_sum<WARPS_PER_BLOCK>(&red_smem[WARPS_PER_BLOCK], sum); // Normalize the logits. float inv_sum = __fdividef(1.f, sum + 1.e-6f); for( int ti = tidx; ti <= params.timestep; ti += THREADS_PER_BLOCK ) { convert_from_float(logits_smem[ti], qk_smem[ti] * inv_sum); } // Make sure the logits are in shared memory. __syncthreads(); // The number of elements per vector. constexpr int V_VEC_SIZE = Dh / THREADS_PER_VALUE; // A vector of V elements for the current timestep. using V_vec = typename V_vec_<T, V_VEC_SIZE>::Type; // The value computed by this thread. int vo = tidx / THREADS_PER_VALUE; // The hidden dimensions computed by this particular thread. int vi = tidx % THREADS_PER_VALUE * V_VEC_SIZE; // The base pointer for the value in the cache buffer. T *v_cache = &params.v_cache[bhi*params.seq_length*Dh + vi]; #ifdef MMHA_USE_FP32_ACUM_FOR_OUT using V_vec_acum = typename V_vec_acum_fp32_<V_vec>::Type; #else using V_vec_acum = V_vec; #endif // The partial outputs computed by each thread. V_vec_acum out; zero(out); // The number of values processed per iteration of the loop. constexpr int V_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_VALUE; // Loop over the timesteps to compute the partial outputs. for( int ti = vo; ti < params.timestep; ti += V_PER_ITER ) { // Load the values from the cache. V_vec v = *reinterpret_cast<const V_vec*>(&v_cache[ti*Dh]); // Load the logits from shared memory. #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) float logit = logits_smem[ti]; out = fma(logit, cast_to_float(v), out); #else T logit = logits_smem[ti]; // Update the partial sums. out = fma(logit, v, out); #endif } // One group of threads computes the product(s) for the current timestep. if( vo == params.timestep % V_PER_ITER ) { // Trigger the loads from the V buffer. V_vec v = *reinterpret_cast<const V_vec*>(&params.v[qkv_base_offset + vi]); // Trigger the loads from the V bias buffer. V_vec v_bias = *reinterpret_cast<const V_vec*>(&params.v_bias[hi*Dh + vi]); // Compute the V values with bias. v = add(v, v_bias); // Store the values with bias back to global memory in the cache for V. *reinterpret_cast<V_vec*>(&v_cache[params.timestep*Dh]) = v; // Initialize the output value with the current timestep. #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS) out = fma(logits_smem[params.timestep], cast_to_float(v), out); #else out = fma(logits_smem[params.timestep], v, out); #endif } // Make sure we can start writing to shared memory. __syncthreads(); // Run the final reduction amongst the different groups computing different partial outputs. #pragma unroll for( int active_groups = V_PER_ITER; active_groups >= 2; active_groups /= 2 ) { // The midpoint in the number of active groups. int midpoint = active_groups / 2; // The upper part of active threads store to shared memory. if( vo >= midpoint && vo < active_groups ) { #ifdef MMHA_USE_FP32_ACUM_FOR_OUT convert_from_float(*reinterpret_cast<V_vec*>(&out_smem[(vo - midpoint)*Dh + vi]), out); #else *reinterpret_cast<V_vec*>(&out_smem[(vo - midpoint)*Dh + vi]) = out; #endif } __syncthreads(); // The bottom warps update their values. if( vo < midpoint ) { out = add(*reinterpret_cast<const V_vec*>(&out_smem[vo*Dh + vi]), out); } __syncthreads(); } // Output the final values. if( vo == 0 ) { #ifdef MMHA_USE_FP32_ACUM_FOR_OUT convert_from_float(*reinterpret_cast<V_vec*>(&params.out[bhi*Dh + vi]), out); #else *reinterpret_cast<V_vec*>(&params.out[bhi*Dh + vi]) = out; #endif } } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace mmha //////////////////////////////////////////////////////////////////////////////////////////////////// #define MMHA_LAUNCH_KERNEL(T, Dh, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, stream) \ size_t smem_sz = mmha::smem_size_in_bytes<T>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \ dim3 grid(params.num_heads, params.batch_size); \ mmha::masked_multihead_attention_kernel<T, Dh, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK> \ <<<grid, THDS_PER_BLOCK, smem_sz, stream>>>(params) //////////////////////////////////////////////////////////////////////////////////////////////////// template < typename T, int Dh > void mmha_launch_kernel(const Masked_multihead_attention_params<T> &params, const cudaStream_t &stream) { constexpr int THREADS_PER_VALUE = Dh * sizeof(T) / 16; if( params.timestep < 32 ) { MMHA_LAUNCH_KERNEL(T, Dh, 4, THREADS_PER_VALUE, 64, stream); } else if( params.timestep < 2048 ) { MMHA_LAUNCH_KERNEL(T, Dh, 2, THREADS_PER_VALUE, 128, stream); } else { MMHA_LAUNCH_KERNEL(T, Dh, 1, THREADS_PER_VALUE, 256, stream); } } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > void masked_multihead_attention_(const Masked_multihead_attention_params<T> &params, const cudaStream_t &stream) { switch ( params.hidden_size_per_head ) { case 32: mmha_launch_kernel<T, 32>(params, stream); break; case 64: mmha_launch_kernel<T, 64>(params, stream); break; case 128: mmha_launch_kernel<T, 128>(params, stream); break; default: assert(false); } } //////////////////////////////////////////////////////////////////////////////////////////////////// void masked_multihead_attention(const Masked_multihead_attention_params<float> &params, const cudaStream_t &stream) { masked_multihead_attention_(params, stream); } //////////////////////////////////////////////////////////////////////////////////////////////////// void masked_multihead_attention(const Masked_multihead_attention_params<uint16_t> &params, const cudaStream_t &stream) { masked_multihead_attention_(params, stream); } //////////////////////////////////////////////////////////////////////////////////////////////////// #undef MMHA_LAUNCH_KERNEL
similar.hip
// !!! This is a file automatically generated by hipify!!! #include "kernels_hip.cuh" torch::Tensor similar_cuda_forward( const torch::Tensor &x_ori, const torch::Tensor &x_loc, const int kH, const int kW ) { TypeCheck(x_ori); TypeCheck(x_loc); const int batch = x_ori.size(0); const int channels = x_ori.size(1); const int height = x_ori.size(2); const int width = x_ori.size(3); const int rH = kH >> 1; const int rW = kW >> 1; const int patch = kH * kW; const int per_channel = height * width; const int per_input = per_channel * channels; const int per_output = height * width * patch; auto output = torch::empty({batch, height, width, patch}, x_ori.options()); int start_inp = 0, start_out = 0; for (int i = 0; i < batch; ++i) { f_cc2k<float, double>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), x_ori.data_ptr<float>() + start_inp, x_loc.data_ptr<float>() + start_inp, kH, kW, rH, rW, patch, channels, height, width, per_channel, output.data_ptr<float>() + start_out ); start_inp += per_input; start_out += per_output; } return output; } ////////////////////////////////////////////////////////////// torch::Tensor similar_cuda_backward( const torch::Tensor &x, const torch::Tensor &grad_out, const int kH, const int kW, const bool is_ori ) { TypeCheck(x); const int batch = x.size(0); const int channels = x.size(1); const int height = x.size(2); const int width = x.size(3); const int rH = kH >> 1; const int rW = kW >> 1; const int patch = kH * kW; const int per_channel = height * width; const int per_input = per_channel * channels; auto grad_inp = torch::empty({batch, channels, height, width}, x.options()); int start_inp = 0; for (int i = 0; i < batch; ++i) { auto grad_out_row = grad_out.select(0, i); if (is_ori) { f_ck2c_ori<float, double>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), x.data_ptr<float>() + start_inp, grad_out_row.data_ptr<float>(), kH, kW, rH, rW, patch, channels, height, width, per_channel, per_input, grad_inp.data_ptr<float>() + start_inp ); } else { f_ck2c_loc<float, double>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), x.data_ptr<float>() + start_inp, grad_out_row.data_ptr<float>(), kH, kW, rH, rW, patch, channels, height, width, per_channel, per_input, grad_inp.data_ptr<float>() + start_inp ); } start_inp += per_input; } return grad_inp; }
similar.cu
#include "kernels.cuh" torch::Tensor similar_cuda_forward( const torch::Tensor &x_ori, const torch::Tensor &x_loc, const int kH, const int kW ) { TypeCheck(x_ori); TypeCheck(x_loc); const int batch = x_ori.size(0); const int channels = x_ori.size(1); const int height = x_ori.size(2); const int width = x_ori.size(3); const int rH = kH >> 1; const int rW = kW >> 1; const int patch = kH * kW; const int per_channel = height * width; const int per_input = per_channel * channels; const int per_output = height * width * patch; auto output = torch::empty({batch, height, width, patch}, x_ori.options()); int start_inp = 0, start_out = 0; for (int i = 0; i < batch; ++i) { f_cc2k<float, double>( at::cuda::getCurrentCUDAStream(), x_ori.data_ptr<float>() + start_inp, x_loc.data_ptr<float>() + start_inp, kH, kW, rH, rW, patch, channels, height, width, per_channel, output.data_ptr<float>() + start_out ); start_inp += per_input; start_out += per_output; } return output; } ////////////////////////////////////////////////////////////// torch::Tensor similar_cuda_backward( const torch::Tensor &x, const torch::Tensor &grad_out, const int kH, const int kW, const bool is_ori ) { TypeCheck(x); const int batch = x.size(0); const int channels = x.size(1); const int height = x.size(2); const int width = x.size(3); const int rH = kH >> 1; const int rW = kW >> 1; const int patch = kH * kW; const int per_channel = height * width; const int per_input = per_channel * channels; auto grad_inp = torch::empty({batch, channels, height, width}, x.options()); int start_inp = 0; for (int i = 0; i < batch; ++i) { auto grad_out_row = grad_out.select(0, i); if (is_ori) { f_ck2c_ori<float, double>( at::cuda::getCurrentCUDAStream(), x.data_ptr<float>() + start_inp, grad_out_row.data_ptr<float>(), kH, kW, rH, rW, patch, channels, height, width, per_channel, per_input, grad_inp.data_ptr<float>() + start_inp ); } else { f_ck2c_loc<float, double>( at::cuda::getCurrentCUDAStream(), x.data_ptr<float>() + start_inp, grad_out_row.data_ptr<float>(), kH, kW, rH, rW, patch, channels, height, width, per_channel, per_input, grad_inp.data_ptr<float>() + start_inp ); } start_inp += per_input; } return grad_inp; }
8c0a0f0305e5f85fb9a84c41b478ad2d20fb5b4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ name : TS-GAP.cu Author : Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <stdio.h> #include <stdlib.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <sys/time.h> #include "gpulib/types.h" #include "gpulib/gpu.cuh" #include "Instance.h" #include "Solution.h" #include "gSolution.cuh" #include "guloso.h" const int nThreads = 64; const int nBlocks = 10; const int maxChain = 10; int main(int argc, char *argv[]) { //Variable with GPU's number int deviceCount = 0; //Commands for verify use correct of GPU hipError_t error_id = hipGetDeviceCount(&deviceCount); if (error_id != hipSuccess) { printf("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id)); printf("Result = FAIL\n"); exit(1); } if(deviceCount == 0) { printf("No GPU found :("); exit(1); } else { printf("Found %d GPUs!\n", deviceCount); gpuSetDevice(0); printf("GPU 0 initialized!\n"); } //iterator of use in for int i, j; //Pointer of states for use in hiprand (GPU) hiprandState_t *states; hipMalloc((void**)&states, (nThreads*nBlocks)*sizeof(hiprandState_t)); //Pointer of seed for use with hiprand (host) unsigned int *h_seed = (unsigned int*)malloc(sizeof(unsigned int)*(nThreads*nBlocks)); srand(time(NULL)); for(i=0;i<(nThreads*nBlocks);i++){ h_seed[i] = rand()%100000; } //Pointers of intance, solution and ejection for use in CPU(Host) and GPU(Device) Instance *h_instance, *d_instance; Solution *h_solution, *d_solution, *best_solution; EjectionChain *h_ejection, *d_ejection; //Load Instance char nameAux[50] = "../Instances/"; const char *temp_teste = argv[1]; strcat(nameAux,argv[1]); printf("%s \n",nameAux); const char *fileName = nameAux; //argv[1]; // strcat(fileName,argv[1]); h_instance = loadInstance(fileName); int ti = atoi(argv[5]); //showInstance(h_instance); //Allocation Solution and Ejection best_solution = allocationPointersSolution(h_instance); h_solution = allocationPointersSolution(h_instance); h_ejection = allocationPointerEjectionChain(h_instance); //weight greedy float w1,w2; struct timeval time_rand; //Generate Initial Solution from greedy method for(i=0;i<nBlocks;i++){ gettimeofday(&time_rand,NULL); srand(time_rand.tv_usec); //memset(h_solution,0,size_solution); if(temp_teste[0]=='e'){ do{ printf("Teste"); for(j=0;j<h_instance->mAgents;j++){ h_solution->resUsage[j+i*h_instance->mAgents] = 0; } w1 = (float)(rand())/(float)(RAND_MAX) + 0.5; w2 = 19 + w1; }while(guloso(h_instance,h_solution,w1,w2,i)==0); }else{ do{ for(j=0;j<h_instance->mAgents;j++){ h_solution->resUsage[j+i*h_instance->mAgents] = 0; } w1 = (float)(rand())/(float)(RAND_MAX) + 0.5; w2 = 1 + w1; }while(guloso(h_instance,h_solution,w1,w2,i)==0); } } //best_solution = h_solution; //Size Struct Solution size_t size_solution = sizeof(Solution) + sizeof(TcostFinal)*nBlocks + sizeof(Ts)*(h_instance->nJobs*nBlocks) + sizeof(TresUsage)*(h_instance->mAgents*nBlocks); for(i=0;i<nBlocks;i++){ best_solution->costFinal[i] = h_solution->costFinal[i]; for(j=0;j<h_instance->nJobs;j++){ best_solution->s[j+i*h_instance->nJobs] = h_solution->costFinal[j+i*h_instance->nJobs]; } for(j=0;j<h_instance->mAgents;j++){ best_solution->resUsage[j+i*h_instance->mAgents] = h_solution->resUsage[j+i*h_instance->mAgents]; } } //Size Struct of Ejection Chain size_t size_ejection = sizeof(EjectionChain) + sizeof(Tpos)*(nBlocks*nThreads*maxChain) + sizeof(Top)*(nBlocks*nThreads) + sizeof(TSizeChain)*(nBlocks*nThreads) + sizeof(Tdelta)*(nBlocks*nThreads); //Size Struct of Instance size_t size_instance = sizeof(Instance) + sizeof(Tcost)*(h_instance->nJobs*h_instance->mAgents) //cost + sizeof(TresourcesAgent)*(h_instance->nJobs*h_instance->mAgents) + sizeof(Tcapacity)*h_instance->mAgents; int *h_short_list = (int*)malloc(sizeof(int)*(nBlocks*h_instance->nJobs)); int *h_long_list = (int*)malloc(sizeof(int)*(h_instance->nJobs*h_instance->mAgents)); memset(h_short_list,0,sizeof(int)*(nBlocks*h_instance->nJobs)); memset(h_long_list,0,sizeof(int)*(h_instance->nJobs*h_instance->mAgents)); int cost_saida = 1000000; for(i=0;i<nBlocks;i++){ for(j=0;j<h_instance->nJobs;j++){ h_long_list[j + h_solution->s[j+i*h_instance->nJobs]*h_instance->nJobs]++; } } for(i=0;i<nBlocks;i++){ printf("Initial cost: %d\n", h_solution->costFinal[i]); if(cost_saida>h_solution->costFinal[i]){ cost_saida=h_solution->costFinal[i]; } // if(i==0){ // for(j=0;j<h_instance->nJobs;j++){ // printf("job %d agent %d\n",j, h_solution->s[j+i*h_instance->nJobs]); // } // } } int nJ = h_instance->nJobs; int *d_short_list; gpuMalloc((void*)&d_short_list,sizeof(int)*(nBlocks*h_instance->nJobs) ); gpuMemcpy(d_short_list, h_short_list,sizeof(int)*(nBlocks*h_instance->nJobs), hipMemcpyHostToDevice); // int blockSize; // The launch configurator returned block size // int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch // int gridSize; // int N = 1000000; // hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize,TS_GAP, 0, N); // printf("block size %d\n",blockSize); // printf("Min Grid %d\n",minGridSize); // getchar(); //Reallocation of pointers Instance and Solution for GPU (device) d_instance = createGPUInstance(h_instance, h_instance->nJobs, h_instance->mAgents); d_solution = createGPUsolution(h_solution,h_instance->nJobs, h_instance->mAgents); d_ejection = createGPUejection(h_ejection,h_instance->nJobs, h_instance->mAgents); //Pointers seed in device (GPU) unsigned int *d_seed; //Event and gpu for contability time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Allocation of pointer and copy value in d_seed (Device) gpuMalloc((void*)&d_seed, sizeof(unsigned int)*(nThreads*nBlocks)); gpuMemcpy(d_seed, h_seed, sizeof(unsigned int)*(nThreads*nBlocks), hipMemcpyHostToDevice); hipEventRecord(start); int n_iteration = atoi(argv[2]); int ite=1; int n_busca = atoi(argv[3]); int b_solution = atoi(argv[4]); int ite_b = 0; int sizeTabu; int menor,aux1,t1,m1,m2,aux; int *v_menor_pos = (int*)malloc(sizeof(int)*nBlocks); int b_aux; struct timeval inicio; struct timeval t_inicio; struct timeval fim; struct timeval t_fim; int tmili = 0; int tmelhora = 0; nJ =1.25*( nJ/(maxChain+1) ); // nJ = 15; gettimeofday(&inicio, NULL); size_t freeMem, totalMem; gettimeofday(&t_inicio,NULL); while((ite<=n_iteration)&&(tmili<=(ti*60000))&&(tmelhora<15000)){ sizeTabu = rand()%nJ + 1; // printf("Size tabu: %d\n", sizeTabu); hipLaunchKernelGGL(( TS_GAP), dim3(nBlocks),dim3(nThreads), 0, 0, d_instance, d_solution,d_ejection, d_short_list, d_seed, states, ite, n_busca); gpuDeviceSynchronize(); hipMemGetInfo(&freeMem, &totalMem); printf("Free = %zu, Total = %zu\n, size_intance = %zu", freeMem, totalMem,size_instance); gpuMemcpy(h_instance, d_instance, size_instance, hipMemcpyDeviceToHost); gpuMemcpy(h_solution, d_solution, size_solution, hipMemcpyDeviceToHost); gpuMemcpy(h_ejection, d_ejection, size_ejection, hipMemcpyDeviceToHost); gpuMemcpy(h_short_list, d_short_list,sizeof(int)*(nBlocks*h_instance->nJobs), hipMemcpyDeviceToHost); gpuMemcpy(h_seed, d_seed, sizeof(unsigned int)*(nThreads*nBlocks), hipMemcpyDeviceToHost); //reallocation pointers of Instance h_instance->cost = (Tcost*)(h_instance+1); h_instance->resourcesAgent =(TresourcesAgent*) (h_instance->cost +(h_instance->nJobs*h_instance->mAgents)); h_instance->capacity =(Tcapacity*) (h_instance->resourcesAgent + (h_instance->nJobs*h_instance->mAgents)); //reallocation pointers of Solution h_solution->costFinal = (TcostFinal*)(h_solution+1); h_solution->s = (Ts*)(h_solution->costFinal + nBlocks); h_solution->resUsage = (TresUsage*)(h_solution->s + (h_instance->nJobs*nBlocks)); //reallocation pointers of Ejection h_ejection->pos=(Tpos*)(h_ejection + 1); h_ejection->op = (Top*)(h_ejection->pos+ (nBlocks*nThreads*maxChain)); h_ejection->sizeChain = (TSizeChain*)(h_ejection->op + (nBlocks*nThreads)); h_ejection->delta = (Tdelta*)(h_ejection->sizeChain + (nBlocks*nThreads)); // printf("%d time %d \n",ite,tmili); for(i=0;i<nBlocks;i++){ menor = 100000; for(j=0;j<nThreads;j++){ if(h_ejection->delta[j + i*nThreads]<menor){ menor = h_ejection->delta[j + i*nThreads]; } //printf("value of delta for thread %d in block %d: :%d \n", j, i, h_ejection->delta[j + i*nThreads]); } menor = returnIndice(h_solution,h_ejection,i,nBlocks,nThreads,menor,h_long_list,h_instance->nJobs,h_instance->mAgents); // printf("menor delta do bloco %d: %d\n",i,menor); if(h_ejection->op[menor + i*nThreads]==1){ aux1 = h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads]; //aux2 = ejection->pos[0 + menor*maxChain + i*maxChain*nThreads]; h_short_list[aux1 + i*h_instance->nJobs] = ite + sizeTabu; }else{ b_aux = rand()%h_ejection->sizeChain[menor+i*nThreads]; //for(j = 0; j<h_ejection->sizeChain[menor + i*nThreads];j++){ aux1 = h_ejection->pos[b_aux + menor*maxChain + i*maxChain*nThreads]; h_short_list[aux1 + i*h_instance->nJobs] = ite + sizeTabu; //} } h_solution->costFinal[i] += h_ejection->delta[menor+i*nThreads]; if(h_ejection->op[menor + i*nThreads]==1){ t1 = h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads]; m2 = h_ejection->pos[1 + menor*maxChain + i*maxChain*nThreads]; m1 = ((int)h_solution->s[t1 + i*h_instance->nJobs]); h_solution->resUsage[m1 + i*h_instance->mAgents] -= h_instance->resourcesAgent[t1*h_instance->mAgents + m1]; h_solution->resUsage[m2 + i*h_instance->mAgents] += h_instance->resourcesAgent[t1*h_instance->mAgents + m2]; h_solution->s[t1 + i*h_instance->nJobs] = ((char)m2); // if(m2>4){ // printf("op 1"); // } }else{ h_solution->resUsage[((int)h_solution->s[h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs]) + i*h_instance->mAgents] += h_instance->resourcesAgent[h_ejection->pos[(h_ejection->sizeChain[menor + i*nThreads]-1) + menor*maxChain + i*maxChain*nThreads]*h_instance->mAgents + ((int)h_solution->s[h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs])]; h_solution->resUsage[((int)h_solution->s[h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs]) + i*h_instance->mAgents] -= h_instance->resourcesAgent[h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads]*h_instance->mAgents + ((int)h_solution->s[h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs])]; aux = ((int)h_solution->s[h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads]+ i*h_instance->nJobs]); for(j=1; j<h_ejection->sizeChain[menor + i*nThreads]; j++){ h_solution->resUsage[((int)h_solution->s[h_ejection->pos[j + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs]) + i*h_instance->mAgents] += h_instance->resourcesAgent[h_ejection->pos[(j-1) + menor*maxChain + i*maxChain*nThreads]*h_instance->mAgents + ((int)h_solution->s[h_ejection->pos[j + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs])]; h_solution->resUsage[((int)h_solution->s[h_ejection->pos[j + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs]) + i*h_instance->mAgents] -= h_instance->resourcesAgent[h_ejection->pos[j + menor*maxChain + i*maxChain*nThreads]*h_instance->mAgents + ((int)h_solution->s[h_ejection->pos[j + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs])]; h_solution->s[h_ejection->pos[(j-1) + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs] = h_solution->s[h_ejection->pos[j + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs]; } h_solution->s[h_ejection->pos[(h_ejection->sizeChain[menor + i*nThreads]-1) + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs] = ((char)aux); } // printf("cost: %d\n", h_solution->costFinal[i]); if(h_solution->costFinal[i]<cost_saida){ cost_saida = h_solution->costFinal[i]; tmelhora = 0; gettimeofday(&t_inicio,NULL); printf("tempo/ custo: %d - %d \n", tmili, cost_saida); } if(h_solution->costFinal[i] < best_solution->costFinal[i]){ best_solution->costFinal[i] = h_solution->costFinal[i]; for(j=0;j<h_instance->nJobs;j++){ best_solution->s[j + i*h_instance->nJobs] = h_solution->s[j + i*h_instance->nJobs]; } for(j=0;j<h_instance->mAgents;j++){ best_solution->resUsage[j + i*h_instance->mAgents] = h_solution->resUsage[j + i*h_instance->mAgents]; } } } for(i=0;i<nBlocks;i++){ for(j=0;j<h_instance->nJobs;j++){ h_long_list[j + h_solution->s[j+i*h_instance->nJobs]*h_instance->nJobs]++; } } /* for(i=0;i<nBlocks;i++){ for(j=0;j<h_instance->nJobs;j++){ h_long_list[j + h_solution->s[j+i*h_instance->nJobs]*h_instance->nJobs]++; if(h_solution->s[j + i*h_instance->nJobs]>4){ printf("cpu teste: %d\n",h_solution->s[j + i*h_instance->nJobs]); } } }*/ gettimeofday(&fim, NULL); gettimeofday(&t_fim, NULL); tmili = (int) (1000 * (fim.tv_sec - inicio.tv_sec) + (fim.tv_usec - inicio.tv_usec) / 1000); // tmelhora = (int) (1000 * (t_fim.tv_sec - t_inicio.tv_sec) + (t_fim.tv_usec - t_inicio.tv_usec) / 1000); if((ite!=n_iteration)&&(tmili<ti*(60000))&&(tmelhora<15000)){ //reallocation pointers of Instanc tmelhora=0; h_instance->cost = (Tcost*)(d_instance+1); h_instance->resourcesAgent =(TresourcesAgent*) (h_instance->cost +(h_instance->nJobs*h_instance->mAgents)); h_instance->capacity =(Tcapacity*) (h_instance->resourcesAgent + (h_instance->nJobs*h_instance->mAgents)); gpuMemcpy(d_instance, h_instance,size_instance, hipMemcpyHostToDevice); //reallocation pointers of Solution h_solution->costFinal = (TcostFinal*)(d_solution+1); h_solution->s = (Ts*)(h_solution->costFinal + nBlocks); h_solution->resUsage = (TresUsage*)(h_solution->s + (h_instance->nJobs*nBlocks)); gpuMemcpy(d_solution, h_solution, size_solution, hipMemcpyHostToDevice); //reallocation pointers of Ejection memset(h_ejection,0,size_ejection); h_ejection->pos=(Tpos*)(d_ejection + 1); h_ejection->op = (Top*)(h_ejection->pos+ (nBlocks*nThreads*maxChain)); h_ejection->sizeChain = (TSizeChain*)(h_ejection->op + (nBlocks*nThreads)); h_ejection->delta = (Tdelta*)(h_ejection->sizeChain + (nBlocks*nThreads)); gpuMemcpy(d_ejection, h_ejection, size_ejection, hipMemcpyHostToDevice); gettimeofday(&time_rand,NULL); srand(time_rand.tv_usec); for(i=0;i<(nThreads*nBlocks);i++){ h_seed[i] = rand()%100000; } gpuMemcpy(d_seed, h_seed, sizeof(unsigned int)*(nThreads*nBlocks), hipMemcpyHostToDevice); gpuMemcpy(d_short_list, h_short_list,sizeof(int)*(nBlocks*h_instance->nJobs), hipMemcpyHostToDevice); }else{ printf("time:%d\n",tmili); } if((ite_b==0)&&(cost_saida<= 1.01 * b_solution)){ ite_b = ite; } ite++; // gettimeofday(&fim, NULL); // tmili = (int) (1000 * (fim.tv_sec - inicio.tv_sec) + (fim.tv_usec - inicio.tv_usec) / 1000); } printf("cost: %d\n ite: %d\n", cost_saida, ite_b); int k; int *cont_similarity = (int*)malloc(sizeof(int)*(h_instance->nJobs*nBlocks)); int *total_similarity = (int*)malloc(sizeof(int)*nBlocks); memset(total_similarity,0,sizeof(int)*nBlocks); memset(cont_similarity,0,sizeof(int)*(h_instance->nJobs*nBlocks)); //h_solution->costFinal[0]=cost_saida; //for(i=0;i<h_instance->nJobs;i++){ // h_solution->s[i] = sol_best[0]; //} // printf("ok1\n"); int* cont_freq = (int*)malloc(sizeof(int)*h_instance->nJobs*h_instance->mAgents); memset(cont_freq,0,h_instance->nJobs*h_instance->mAgents); for(i=0;i<h_instance->nJobs;i++){ for(j=0;j<h_instance->mAgents;j++){ cont_freq[i+j*h_instance->nJobs]=0; } } for(i=0;i<nBlocks;i++){ // printf("pelo block: %d\n",i); for(j=i+1;j<nBlocks;j++){ for(k=0;k<h_instance->nJobs;k++){ if(best_solution->s[k + i*h_instance->nJobs] == best_solution->s[k + j*h_instance->nJobs]){ total_similarity[i]++; total_similarity[j]++; cont_similarity[k + i*h_instance->nJobs]++; cont_similarity[k + j*h_instance->nJobs]++; } } } } // printf("ok2\n"); for(i=0;i<nBlocks;i++){ printf("Solution: %d - %d\n",best_solution->costFinal[i], h_solution->costFinal[i]); printf("Similarity Total:%d\n",total_similarity[i]); } aux = 0; // k = total_similarity[0]; k = best_solution->costFinal[0]; for(i=1;i<nBlocks;i++){ //if(total_similarity[i]>k){ if(best_solution->costFinal[i]<k){ aux = i; // k=total_similarity[i]; k= best_solution->costFinal[i]; } } for(i=0;i<nBlocks;i++){ for(j=0;j<h_instance->nJobs;j++){ cont_freq[j+best_solution->s[j+i*h_instance->nJobs]*h_instance->nJobs]++; } } printf("Solution with most similarity is %d with %d, cost: %d\n",aux,total_similarity[aux],best_solution->costFinal[aux] ); create_solution(best_solution,h_instance,aux,temp_teste); create_frequency(best_solution,h_instance,cont_similarity,aux,temp_teste); create_frequency_2(best_solution,h_instance,cont_freq,aux,temp_teste); hipFree(states); hipFree(d_instance); hipFree(d_solution); hipFree(d_ejection); hipFree(d_seed); hipFree(d_short_list); // free(sol_best); free(h_short_list); free(h_seed); free(h_instance); free(h_solution); free(h_ejection); free(best_solution); free(cont_similarity); free(total_similarity); return 0; }
8c0a0f0305e5f85fb9a84c41b478ad2d20fb5b4b.cu
/* ============================================================================ name : TS-GAP.cu Author : Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <stdio.h> #include <stdlib.h> #include <curand.h> #include <curand_kernel.h> #include <sys/time.h> #include "gpulib/types.h" #include "gpulib/gpu.cuh" #include "Instance.h" #include "Solution.h" #include "gSolution.cuh" #include "guloso.h" const int nThreads = 64; const int nBlocks = 10; const int maxChain = 10; int main(int argc, char *argv[]) { //Variable with GPU's number int deviceCount = 0; //Commands for verify use correct of GPU cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id != cudaSuccess) { printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id)); printf("Result = FAIL\n"); exit(1); } if(deviceCount == 0) { printf("No GPU found :("); exit(1); } else { printf("Found %d GPUs!\n", deviceCount); gpuSetDevice(0); printf("GPU 0 initialized!\n"); } //iterator of use in for int i, j; //Pointer of states for use in curand (GPU) curandState_t *states; cudaMalloc((void**)&states, (nThreads*nBlocks)*sizeof(curandState_t)); //Pointer of seed for use with curand (host) unsigned int *h_seed = (unsigned int*)malloc(sizeof(unsigned int)*(nThreads*nBlocks)); srand(time(NULL)); for(i=0;i<(nThreads*nBlocks);i++){ h_seed[i] = rand()%100000; } //Pointers of intance, solution and ejection for use in CPU(Host) and GPU(Device) Instance *h_instance, *d_instance; Solution *h_solution, *d_solution, *best_solution; EjectionChain *h_ejection, *d_ejection; //Load Instance char nameAux[50] = "../Instances/"; const char *temp_teste = argv[1]; strcat(nameAux,argv[1]); printf("%s \n",nameAux); const char *fileName = nameAux; //argv[1]; // strcat(fileName,argv[1]); h_instance = loadInstance(fileName); int ti = atoi(argv[5]); //showInstance(h_instance); //Allocation Solution and Ejection best_solution = allocationPointersSolution(h_instance); h_solution = allocationPointersSolution(h_instance); h_ejection = allocationPointerEjectionChain(h_instance); //weight greedy float w1,w2; struct timeval time_rand; //Generate Initial Solution from greedy method for(i=0;i<nBlocks;i++){ gettimeofday(&time_rand,NULL); srand(time_rand.tv_usec); //memset(h_solution,0,size_solution); if(temp_teste[0]=='e'){ do{ printf("Teste"); for(j=0;j<h_instance->mAgents;j++){ h_solution->resUsage[j+i*h_instance->mAgents] = 0; } w1 = (float)(rand())/(float)(RAND_MAX) + 0.5; w2 = 19 + w1; }while(guloso(h_instance,h_solution,w1,w2,i)==0); }else{ do{ for(j=0;j<h_instance->mAgents;j++){ h_solution->resUsage[j+i*h_instance->mAgents] = 0; } w1 = (float)(rand())/(float)(RAND_MAX) + 0.5; w2 = 1 + w1; }while(guloso(h_instance,h_solution,w1,w2,i)==0); } } //best_solution = h_solution; //Size Struct Solution size_t size_solution = sizeof(Solution) + sizeof(TcostFinal)*nBlocks + sizeof(Ts)*(h_instance->nJobs*nBlocks) + sizeof(TresUsage)*(h_instance->mAgents*nBlocks); for(i=0;i<nBlocks;i++){ best_solution->costFinal[i] = h_solution->costFinal[i]; for(j=0;j<h_instance->nJobs;j++){ best_solution->s[j+i*h_instance->nJobs] = h_solution->costFinal[j+i*h_instance->nJobs]; } for(j=0;j<h_instance->mAgents;j++){ best_solution->resUsage[j+i*h_instance->mAgents] = h_solution->resUsage[j+i*h_instance->mAgents]; } } //Size Struct of Ejection Chain size_t size_ejection = sizeof(EjectionChain) + sizeof(Tpos)*(nBlocks*nThreads*maxChain) + sizeof(Top)*(nBlocks*nThreads) + sizeof(TSizeChain)*(nBlocks*nThreads) + sizeof(Tdelta)*(nBlocks*nThreads); //Size Struct of Instance size_t size_instance = sizeof(Instance) + sizeof(Tcost)*(h_instance->nJobs*h_instance->mAgents) //cost + sizeof(TresourcesAgent)*(h_instance->nJobs*h_instance->mAgents) + sizeof(Tcapacity)*h_instance->mAgents; int *h_short_list = (int*)malloc(sizeof(int)*(nBlocks*h_instance->nJobs)); int *h_long_list = (int*)malloc(sizeof(int)*(h_instance->nJobs*h_instance->mAgents)); memset(h_short_list,0,sizeof(int)*(nBlocks*h_instance->nJobs)); memset(h_long_list,0,sizeof(int)*(h_instance->nJobs*h_instance->mAgents)); int cost_saida = 1000000; for(i=0;i<nBlocks;i++){ for(j=0;j<h_instance->nJobs;j++){ h_long_list[j + h_solution->s[j+i*h_instance->nJobs]*h_instance->nJobs]++; } } for(i=0;i<nBlocks;i++){ printf("Initial cost: %d\n", h_solution->costFinal[i]); if(cost_saida>h_solution->costFinal[i]){ cost_saida=h_solution->costFinal[i]; } // if(i==0){ // for(j=0;j<h_instance->nJobs;j++){ // printf("job %d agent %d\n",j, h_solution->s[j+i*h_instance->nJobs]); // } // } } int nJ = h_instance->nJobs; int *d_short_list; gpuMalloc((void*)&d_short_list,sizeof(int)*(nBlocks*h_instance->nJobs) ); gpuMemcpy(d_short_list, h_short_list,sizeof(int)*(nBlocks*h_instance->nJobs), cudaMemcpyHostToDevice); // int blockSize; // The launch configurator returned block size // int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch // int gridSize; // int N = 1000000; // cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize,TS_GAP, 0, N); // printf("block size %d\n",blockSize); // printf("Min Grid %d\n",minGridSize); // getchar(); //Reallocation of pointers Instance and Solution for GPU (device) d_instance = createGPUInstance(h_instance, h_instance->nJobs, h_instance->mAgents); d_solution = createGPUsolution(h_solution,h_instance->nJobs, h_instance->mAgents); d_ejection = createGPUejection(h_ejection,h_instance->nJobs, h_instance->mAgents); //Pointers seed in device (GPU) unsigned int *d_seed; //Event and gpu for contability time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Allocation of pointer and copy value in d_seed (Device) gpuMalloc((void*)&d_seed, sizeof(unsigned int)*(nThreads*nBlocks)); gpuMemcpy(d_seed, h_seed, sizeof(unsigned int)*(nThreads*nBlocks), cudaMemcpyHostToDevice); cudaEventRecord(start); int n_iteration = atoi(argv[2]); int ite=1; int n_busca = atoi(argv[3]); int b_solution = atoi(argv[4]); int ite_b = 0; int sizeTabu; int menor,aux1,t1,m1,m2,aux; int *v_menor_pos = (int*)malloc(sizeof(int)*nBlocks); int b_aux; struct timeval inicio; struct timeval t_inicio; struct timeval fim; struct timeval t_fim; int tmili = 0; int tmelhora = 0; nJ =1.25*( nJ/(maxChain+1) ); // nJ = 15; gettimeofday(&inicio, NULL); size_t freeMem, totalMem; gettimeofday(&t_inicio,NULL); while((ite<=n_iteration)&&(tmili<=(ti*60000))&&(tmelhora<15000)){ sizeTabu = rand()%nJ + 1; // printf("Size tabu: %d\n", sizeTabu); TS_GAP<<<nBlocks,nThreads>>>(d_instance, d_solution,d_ejection, d_short_list, d_seed, states, ite, n_busca); gpuDeviceSynchronize(); cudaMemGetInfo(&freeMem, &totalMem); printf("Free = %zu, Total = %zu\n, size_intance = %zu", freeMem, totalMem,size_instance); gpuMemcpy(h_instance, d_instance, size_instance, cudaMemcpyDeviceToHost); gpuMemcpy(h_solution, d_solution, size_solution, cudaMemcpyDeviceToHost); gpuMemcpy(h_ejection, d_ejection, size_ejection, cudaMemcpyDeviceToHost); gpuMemcpy(h_short_list, d_short_list,sizeof(int)*(nBlocks*h_instance->nJobs), cudaMemcpyDeviceToHost); gpuMemcpy(h_seed, d_seed, sizeof(unsigned int)*(nThreads*nBlocks), cudaMemcpyDeviceToHost); //reallocation pointers of Instance h_instance->cost = (Tcost*)(h_instance+1); h_instance->resourcesAgent =(TresourcesAgent*) (h_instance->cost +(h_instance->nJobs*h_instance->mAgents)); h_instance->capacity =(Tcapacity*) (h_instance->resourcesAgent + (h_instance->nJobs*h_instance->mAgents)); //reallocation pointers of Solution h_solution->costFinal = (TcostFinal*)(h_solution+1); h_solution->s = (Ts*)(h_solution->costFinal + nBlocks); h_solution->resUsage = (TresUsage*)(h_solution->s + (h_instance->nJobs*nBlocks)); //reallocation pointers of Ejection h_ejection->pos=(Tpos*)(h_ejection + 1); h_ejection->op = (Top*)(h_ejection->pos+ (nBlocks*nThreads*maxChain)); h_ejection->sizeChain = (TSizeChain*)(h_ejection->op + (nBlocks*nThreads)); h_ejection->delta = (Tdelta*)(h_ejection->sizeChain + (nBlocks*nThreads)); // printf("%d time %d \n",ite,tmili); for(i=0;i<nBlocks;i++){ menor = 100000; for(j=0;j<nThreads;j++){ if(h_ejection->delta[j + i*nThreads]<menor){ menor = h_ejection->delta[j + i*nThreads]; } //printf("value of delta for thread %d in block %d: :%d \n", j, i, h_ejection->delta[j + i*nThreads]); } menor = returnIndice(h_solution,h_ejection,i,nBlocks,nThreads,menor,h_long_list,h_instance->nJobs,h_instance->mAgents); // printf("menor delta do bloco %d: %d\n",i,menor); if(h_ejection->op[menor + i*nThreads]==1){ aux1 = h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads]; //aux2 = ejection->pos[0 + menor*maxChain + i*maxChain*nThreads]; h_short_list[aux1 + i*h_instance->nJobs] = ite + sizeTabu; }else{ b_aux = rand()%h_ejection->sizeChain[menor+i*nThreads]; //for(j = 0; j<h_ejection->sizeChain[menor + i*nThreads];j++){ aux1 = h_ejection->pos[b_aux + menor*maxChain + i*maxChain*nThreads]; h_short_list[aux1 + i*h_instance->nJobs] = ite + sizeTabu; //} } h_solution->costFinal[i] += h_ejection->delta[menor+i*nThreads]; if(h_ejection->op[menor + i*nThreads]==1){ t1 = h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads]; m2 = h_ejection->pos[1 + menor*maxChain + i*maxChain*nThreads]; m1 = ((int)h_solution->s[t1 + i*h_instance->nJobs]); h_solution->resUsage[m1 + i*h_instance->mAgents] -= h_instance->resourcesAgent[t1*h_instance->mAgents + m1]; h_solution->resUsage[m2 + i*h_instance->mAgents] += h_instance->resourcesAgent[t1*h_instance->mAgents + m2]; h_solution->s[t1 + i*h_instance->nJobs] = ((char)m2); // if(m2>4){ // printf("op 1"); // } }else{ h_solution->resUsage[((int)h_solution->s[h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs]) + i*h_instance->mAgents] += h_instance->resourcesAgent[h_ejection->pos[(h_ejection->sizeChain[menor + i*nThreads]-1) + menor*maxChain + i*maxChain*nThreads]*h_instance->mAgents + ((int)h_solution->s[h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs])]; h_solution->resUsage[((int)h_solution->s[h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs]) + i*h_instance->mAgents] -= h_instance->resourcesAgent[h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads]*h_instance->mAgents + ((int)h_solution->s[h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs])]; aux = ((int)h_solution->s[h_ejection->pos[0 + menor*maxChain + i*maxChain*nThreads]+ i*h_instance->nJobs]); for(j=1; j<h_ejection->sizeChain[menor + i*nThreads]; j++){ h_solution->resUsage[((int)h_solution->s[h_ejection->pos[j + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs]) + i*h_instance->mAgents] += h_instance->resourcesAgent[h_ejection->pos[(j-1) + menor*maxChain + i*maxChain*nThreads]*h_instance->mAgents + ((int)h_solution->s[h_ejection->pos[j + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs])]; h_solution->resUsage[((int)h_solution->s[h_ejection->pos[j + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs]) + i*h_instance->mAgents] -= h_instance->resourcesAgent[h_ejection->pos[j + menor*maxChain + i*maxChain*nThreads]*h_instance->mAgents + ((int)h_solution->s[h_ejection->pos[j + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs])]; h_solution->s[h_ejection->pos[(j-1) + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs] = h_solution->s[h_ejection->pos[j + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs]; } h_solution->s[h_ejection->pos[(h_ejection->sizeChain[menor + i*nThreads]-1) + menor*maxChain + i*maxChain*nThreads] + i*h_instance->nJobs] = ((char)aux); } // printf("cost: %d\n", h_solution->costFinal[i]); if(h_solution->costFinal[i]<cost_saida){ cost_saida = h_solution->costFinal[i]; tmelhora = 0; gettimeofday(&t_inicio,NULL); printf("tempo/ custo: %d - %d \n", tmili, cost_saida); } if(h_solution->costFinal[i] < best_solution->costFinal[i]){ best_solution->costFinal[i] = h_solution->costFinal[i]; for(j=0;j<h_instance->nJobs;j++){ best_solution->s[j + i*h_instance->nJobs] = h_solution->s[j + i*h_instance->nJobs]; } for(j=0;j<h_instance->mAgents;j++){ best_solution->resUsage[j + i*h_instance->mAgents] = h_solution->resUsage[j + i*h_instance->mAgents]; } } } for(i=0;i<nBlocks;i++){ for(j=0;j<h_instance->nJobs;j++){ h_long_list[j + h_solution->s[j+i*h_instance->nJobs]*h_instance->nJobs]++; } } /* for(i=0;i<nBlocks;i++){ for(j=0;j<h_instance->nJobs;j++){ h_long_list[j + h_solution->s[j+i*h_instance->nJobs]*h_instance->nJobs]++; if(h_solution->s[j + i*h_instance->nJobs]>4){ printf("cpu teste: %d\n",h_solution->s[j + i*h_instance->nJobs]); } } }*/ gettimeofday(&fim, NULL); gettimeofday(&t_fim, NULL); tmili = (int) (1000 * (fim.tv_sec - inicio.tv_sec) + (fim.tv_usec - inicio.tv_usec) / 1000); // tmelhora = (int) (1000 * (t_fim.tv_sec - t_inicio.tv_sec) + (t_fim.tv_usec - t_inicio.tv_usec) / 1000); if((ite!=n_iteration)&&(tmili<ti*(60000))&&(tmelhora<15000)){ //reallocation pointers of Instanc tmelhora=0; h_instance->cost = (Tcost*)(d_instance+1); h_instance->resourcesAgent =(TresourcesAgent*) (h_instance->cost +(h_instance->nJobs*h_instance->mAgents)); h_instance->capacity =(Tcapacity*) (h_instance->resourcesAgent + (h_instance->nJobs*h_instance->mAgents)); gpuMemcpy(d_instance, h_instance,size_instance, cudaMemcpyHostToDevice); //reallocation pointers of Solution h_solution->costFinal = (TcostFinal*)(d_solution+1); h_solution->s = (Ts*)(h_solution->costFinal + nBlocks); h_solution->resUsage = (TresUsage*)(h_solution->s + (h_instance->nJobs*nBlocks)); gpuMemcpy(d_solution, h_solution, size_solution, cudaMemcpyHostToDevice); //reallocation pointers of Ejection memset(h_ejection,0,size_ejection); h_ejection->pos=(Tpos*)(d_ejection + 1); h_ejection->op = (Top*)(h_ejection->pos+ (nBlocks*nThreads*maxChain)); h_ejection->sizeChain = (TSizeChain*)(h_ejection->op + (nBlocks*nThreads)); h_ejection->delta = (Tdelta*)(h_ejection->sizeChain + (nBlocks*nThreads)); gpuMemcpy(d_ejection, h_ejection, size_ejection, cudaMemcpyHostToDevice); gettimeofday(&time_rand,NULL); srand(time_rand.tv_usec); for(i=0;i<(nThreads*nBlocks);i++){ h_seed[i] = rand()%100000; } gpuMemcpy(d_seed, h_seed, sizeof(unsigned int)*(nThreads*nBlocks), cudaMemcpyHostToDevice); gpuMemcpy(d_short_list, h_short_list,sizeof(int)*(nBlocks*h_instance->nJobs), cudaMemcpyHostToDevice); }else{ printf("time:%d\n",tmili); } if((ite_b==0)&&(cost_saida<= 1.01 * b_solution)){ ite_b = ite; } ite++; // gettimeofday(&fim, NULL); // tmili = (int) (1000 * (fim.tv_sec - inicio.tv_sec) + (fim.tv_usec - inicio.tv_usec) / 1000); } printf("cost: %d\n ite: %d\n", cost_saida, ite_b); int k; int *cont_similarity = (int*)malloc(sizeof(int)*(h_instance->nJobs*nBlocks)); int *total_similarity = (int*)malloc(sizeof(int)*nBlocks); memset(total_similarity,0,sizeof(int)*nBlocks); memset(cont_similarity,0,sizeof(int)*(h_instance->nJobs*nBlocks)); //h_solution->costFinal[0]=cost_saida; //for(i=0;i<h_instance->nJobs;i++){ // h_solution->s[i] = sol_best[0]; //} // printf("ok1\n"); int* cont_freq = (int*)malloc(sizeof(int)*h_instance->nJobs*h_instance->mAgents); memset(cont_freq,0,h_instance->nJobs*h_instance->mAgents); for(i=0;i<h_instance->nJobs;i++){ for(j=0;j<h_instance->mAgents;j++){ cont_freq[i+j*h_instance->nJobs]=0; } } for(i=0;i<nBlocks;i++){ // printf("pelo block: %d\n",i); for(j=i+1;j<nBlocks;j++){ for(k=0;k<h_instance->nJobs;k++){ if(best_solution->s[k + i*h_instance->nJobs] == best_solution->s[k + j*h_instance->nJobs]){ total_similarity[i]++; total_similarity[j]++; cont_similarity[k + i*h_instance->nJobs]++; cont_similarity[k + j*h_instance->nJobs]++; } } } } // printf("ok2\n"); for(i=0;i<nBlocks;i++){ printf("Solution: %d - %d\n",best_solution->costFinal[i], h_solution->costFinal[i]); printf("Similarity Total:%d\n",total_similarity[i]); } aux = 0; // k = total_similarity[0]; k = best_solution->costFinal[0]; for(i=1;i<nBlocks;i++){ //if(total_similarity[i]>k){ if(best_solution->costFinal[i]<k){ aux = i; // k=total_similarity[i]; k= best_solution->costFinal[i]; } } for(i=0;i<nBlocks;i++){ for(j=0;j<h_instance->nJobs;j++){ cont_freq[j+best_solution->s[j+i*h_instance->nJobs]*h_instance->nJobs]++; } } printf("Solution with most similarity is %d with %d, cost: %d\n",aux,total_similarity[aux],best_solution->costFinal[aux] ); create_solution(best_solution,h_instance,aux,temp_teste); create_frequency(best_solution,h_instance,cont_similarity,aux,temp_teste); create_frequency_2(best_solution,h_instance,cont_freq,aux,temp_teste); cudaFree(states); cudaFree(d_instance); cudaFree(d_solution); cudaFree(d_ejection); cudaFree(d_seed); cudaFree(d_short_list); // free(sol_best); free(h_short_list); free(h_seed); free(h_instance); free(h_solution); free(h_ejection); free(best_solution); free(cont_similarity); free(total_similarity); return 0; }
0585c307a16c7d1cd2474292eae47d4763e1e493.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_yvel_plus_4_back; int xdim0_update_halo_kernel2_yvel_plus_4_back_h = -1; __constant__ int ydim0_update_halo_kernel2_yvel_plus_4_back; int ydim0_update_halo_kernel2_yvel_plus_4_back_h = -1; __constant__ int xdim1_update_halo_kernel2_yvel_plus_4_back; int xdim1_update_halo_kernel2_yvel_plus_4_back_h = -1; __constant__ int ydim1_update_halo_kernel2_yvel_plus_4_back; int ydim1_update_halo_kernel2_yvel_plus_4_back_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_yvel_plus_4_back * (y) + \ xdim0_update_halo_kernel2_yvel_plus_4_back * \ ydim0_update_halo_kernel2_yvel_plus_4_back * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_yvel_plus_4_back * (y) + \ xdim1_update_halo_kernel2_yvel_plus_4_back * \ ydim1_update_halo_kernel2_yvel_plus_4_back * (z)) // user function __device__ inline void update_halo_kernel2_yvel_plus_4_back(double *yvel0, double *yvel1, const int *fields) { if (fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0, 0, 0)] = yvel0[OPS_ACC0(0, 0, 4)]; if (fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0, 0, 0)] = yvel1[OPS_ACC1(0, 0, 4)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_yvel_plus_4_back( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_back + idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_back * ydim0_update_halo_kernel2_yvel_plus_4_back; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_back + idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_back * ydim1_update_halo_kernel2_yvel_plus_4_back; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_yvel_plus_4_back(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_yvel_plus_4_back(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 89)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(89, "update_halo_kernel2_yvel_plus_4_back"); OPS_kernels[89].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_back_h || ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_back_h || xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_back_h || ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_back_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_plus_4_back, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_yvel_plus_4_back_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_plus_4_back, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_yvel_plus_4_back_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_plus_4_back, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_yvel_plus_4_back_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_plus_4_back, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_yvel_plus_4_back_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[89].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_4_back), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[89].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[89].mpi_time += t2 - t1; OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
0585c307a16c7d1cd2474292eae47d4763e1e493.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_yvel_plus_4_back; int xdim0_update_halo_kernel2_yvel_plus_4_back_h = -1; __constant__ int ydim0_update_halo_kernel2_yvel_plus_4_back; int ydim0_update_halo_kernel2_yvel_plus_4_back_h = -1; __constant__ int xdim1_update_halo_kernel2_yvel_plus_4_back; int xdim1_update_halo_kernel2_yvel_plus_4_back_h = -1; __constant__ int ydim1_update_halo_kernel2_yvel_plus_4_back; int ydim1_update_halo_kernel2_yvel_plus_4_back_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_yvel_plus_4_back * (y) + \ xdim0_update_halo_kernel2_yvel_plus_4_back * \ ydim0_update_halo_kernel2_yvel_plus_4_back * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_yvel_plus_4_back * (y) + \ xdim1_update_halo_kernel2_yvel_plus_4_back * \ ydim1_update_halo_kernel2_yvel_plus_4_back * (z)) // user function __device__ inline void update_halo_kernel2_yvel_plus_4_back(double *yvel0, double *yvel1, const int *fields) { if (fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0, 0, 0)] = yvel0[OPS_ACC0(0, 0, 4)]; if (fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0, 0, 0)] = yvel1[OPS_ACC1(0, 0, 4)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_yvel_plus_4_back( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_back + idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_back * ydim0_update_halo_kernel2_yvel_plus_4_back; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_back + idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_back * ydim1_update_halo_kernel2_yvel_plus_4_back; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_yvel_plus_4_back(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_yvel_plus_4_back(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 89)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(89, "update_halo_kernel2_yvel_plus_4_back"); OPS_kernels[89].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_back_h || ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_back_h || xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_back_h || ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_back_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_plus_4_back, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_yvel_plus_4_back_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_plus_4_back, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_yvel_plus_4_back_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_plus_4_back, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_yvel_plus_4_back_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_plus_4_back, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_yvel_plus_4_back_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[89].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_yvel_plus_4_back<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[89].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[89].mpi_time += t2 - t1; OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
5b0d827ec3333126a3f4659db7acc3397a70fb36.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cudaconv2.cuh> #include "tt.h" /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread. * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise * targets: (numColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * Number of filters must be divisible by 16. * Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. */ template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_color(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[numColors*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int blockCaseIdx = blockIdx.x * 16*imgsPerThread; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const int numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeX * imgSizeY; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + loadY * numImages * numModules + loadX; filters += threadIdx.x; targets += pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[numColors][imgsPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f]; #pragma unroll for (int c = 0; c < numColors; c++) { shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread) * blockIdx.x.y = 1..numImgColors/colorsPerThread * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * numImageColors/numGroups must be divisible by colorsPerThread. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are 4-16 color channels. */ template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,16*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const uint numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX; filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x; targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by 16. * * B_X * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16 __shared__ float shHidActs[16][B_X*imgsPerThread]; const int numColorsPerBlock = B_Y*colorsPerThread; const int numImagesPerBlock = B_X*imgsPerThread; const int numImgBlocks = DIVUP(numImages, numImagesPerBlock); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * numImagesPerBlock; const int imgColorIdx = (blockIdx.x / numImgBlocks) * numColorsPerBlock; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32; const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16; const int numModules = numModulesY * numModulesX; /* //------------- { int i_img_l, j_l, f_l, moduleIdx_l; //numImgBlocks = DIVUP(numImages, numImagesPerBlock); //blocks = dim3(numImgBlocks * (numImgColors/numColorsPerBlock), imgPixels); //imgColorIdx = (blockIdx.x / numImgBlocks) * numColorsPerBlock //blockCaseIdx=(blockIdx.x % numImgBlocks) * numImagesPerBlock; //block.x = bx_img_block + numImgBlocks * bx_color_block //moduleIdx = my * numModulesX + mx; //imgColorIdx = bx_color_block * numColorsPerBlock; // color idx globally BaseIndex<3> hidIndex; hidIndex << Index(numFiltersPerGroup, blockGroupIdx) //= blockFilterIdx, blockGroupIdx = imgColorIdx / numFilterColors; >>f_l //16 < numFiltersPerGroup >>j_l //B_X*B_Y/32 < 16 << Index(1, hidActLoadY) // < B_X*B_Y/32 << numModules >>moduleIdx_l //=my * numModulesX + mx < numModules << numImages << Index(numImagesPerBlock, (blockIdx.x % numImgBlocks)) //blockCaseIdx >> i_img_l//step 32 < numImagesPerBlock << Index(1, hidActLoadX);//tidx % 32; //------------------ int pxIdxInFilter_l, i_clr_l; BaseIndex<3> filterIndex; filterIndex << Index(1, filterColorIdx) //= imgColorIdx % numFilterColors; // color idx within group //fLoad[i * filterPixels * numFilters]; >> i_clr_l // B_X*B_Y/16, < numColorsPerBlock << Index(1, filtersLoadY) //< B_X*B_Y/16 << filterPixels //filters[pxIdxInFilter * numFilters + f] >> pxIdxInFilter_l << numFilters << Index(numFiltersPerGroup, blockGroupIdx) //blockFilterIdx >> f_l // 16 < numFiltersPerGroup << Index(1, filtersLoadX); // < 16 //------------------ //c * B_Y * imgPixels * numImages + i * B_X int c_t_l, i_t_l; BaseIndex<2> targetIndex; targetIndex << Index(1, imgColorIdx) //(blockIdx.x / numImgBlocks) * numColorsPerBlock >> c_t_l // *B_Y << Index(1, threadIdx.y) << imgPixels << Index(1, blockPixelIdx) << numImages << Index(1, blockCaseIdx)// (blockIdx.x % numImgBlocks) * numImagesPerBlock; >> i_t_l // *B_X << Index(1, threadIdx.x); float prod[colorsPerThread][imgsPerThread]; memset(prod, 0, sizeof(prod)); const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time for (int i = 0; i < numImagesPerBlock; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActs[j + hidActLoadY][i + hidActLoadX] = hidActs[hidIndex._offset + OFFSET(f, hidIndex) + OFFSET(j, hidIndex) + OFFSET_(moduleIdx, moduleIdx_l, hidIndex) + OFFSET_(i, i_img_l, hidIndex)]; } } else { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActs[j + hidActLoadY][i + hidActLoadX] = 0; } } } //no local connection here! // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) { if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilters[filtersLoadY + i][filtersLoadX] = filters[filterIndex._offset + OFFSET_(i, i_clr_l, filterIndex) + OFFSET(pxIdxInFilter,filterIndex) + OFFSET(f,filterIndex)]; } } __syncthreads(); #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); }//f }//mx }//my if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { int toff = targetIndex._offset + OFFSET_(c, c_t_l, targetIndex) + OFFSET_(i, i_t_l, targetIndex); targets[toff] = scaleTargets * targets[toff] + scaleOutputs * prod[c][i]; } //c } //if } //i } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { int toff = targetIndex._offset + OFFSET_(c, c_t_l, targetIndex) + OFFSET_(i, i_t_l, targetIndex); targets[toff] = scaleOutputs * prod[c][i]; } //c }// if } // i }// if scale } */ //------------------ hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; memset(prod, 0, sizeof(prod)); const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * B_X; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) { if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread) * blockIdx.x.y = 1..numImgColors/colorsPerThread * blockIdx.y determines 4x4 image region in target image, also sample * In essence, blockIdx.y.x = 1..numRegions * blockIdx.y.y = 1..overSample * * threadIdx.x determines case. * threadIdx.y determines pixel. * * overSample := numFilterColors*numGroups/numImgColors * ^ this is the number of groups that each color channel is connected to * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages) * * colorIndices: (numGroups, numFilterColors) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by colorsPerThread. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are 4-16 color channels. */ template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_mediumcolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numFilterColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; __shared__ int shColors[colorsPerThread]; // not really necessary -- can repurpose the other shmems const int numImgBlocks = DIVUP(numImages,16*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread; const int numRegionsX = DIVUP(imgSizeX, 4); const int numRegions = numRegionsX * numRegionsX; const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int overSample = gridDim.y / numRegions; const int blockSample = blockIdx.y / numRegions; const int groupsPerSample = numGroups / overSample; const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockRegionIdx = blockIdx.y % numRegions; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const uint numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX; filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x; targets += blockSample * numImgColors * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; if (tidx < colorsPerThread) { shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages; } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[shColors[c] + i * 16] = scaleTargets * targets[shColors[c] + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[shColors[c] + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image, sample idx. * In essence, blockIdx.y.x = 1..imgPixels * blockIdx.y.y = 1..overSample * * threadIdx.x determines case. * threadIdx.y determines color. * * overSample := numFilterColors*numGroups/numImgColors * ^ this is the number of groups that each color channel is connected to * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages) * * colorIndices: (numGroups, numFilterColors) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by 16. * numFilterColors*numGroups must be divisible by numImgColors. * * B_X * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_manycolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numFilterColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16 __shared__ float shHidActs[16][B_X*imgsPerThread]; __shared__ int shColors[colorsPerThread * B_Y]; // not really necessary -- can repurpose the other shmems const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32; const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16; const int numModules = numModulesY * numModulesX; const int overSample = gridDim.y / imgPixels; const int blockSample = blockIdx.y / imgPixels; const int groupsPerSample = numGroups / overSample; // const int overSample = (numFilterColors * numGroups) / numImgColors; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y * colorsPerThread; // color idx globally const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample; // const int filterColorsPerSample = numFilterColors / overSample; const int blockPixelIdx = blockIdx.y % imgPixels; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += blockSample * numImgColors * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; if (tidx < colorsPerThread * B_Y) { shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages; } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * B_X; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) { if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleTargets * targets[shColors[c * B_Y + threadIdx.y] + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * hidActs: (numFilters, numModules, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgPixels, numImages) * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _imgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numImages = hidActs.getNumCols(); int numFilters = filters.getNumCols(); int numModules = hidActs.getNumRows() / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSizeY * imgSizeX; int numModulesX = numModules / numModulesY; assert(numImgColors % numGroups == 0); assert(numFilters % (16*numGroups) == 0); assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 4 == 0); assert(filterPixels == filterSize * filterSize); assert(hidActs.getNumRows() == numModules * numFilters); assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels); assert(numModules == numModulesY * numModulesX); assert(hidActs.isContiguous()); assert(filters.isContiguous()); assert(!hidActs.isTrans()); assert(!filters.isTrans()); assert(!targets.isTrans()); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0); assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); assert(moduleStride <= filterSize); assert(targets.isContiguous()); // no stride support here! dim3 blocks; dim3 threads(16,16); int colorsPerThread; int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; if (numFilterColors % 8 == 0) { threads = dim3(32, 4); colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2; imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; assert(numFilterColors % (threads.y * colorsPerThread) == 0); blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels); } else if (numFilterColors > 3) { colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } else { blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; if (scaleTargets == 0) { // do not scale or use targets matrix targets.resize(numImgColors*imgPixels, numImages); } else { assert(targets.getNumRows() == numImgColors * imgPixels); assert(targets.getNumCols() == numImages); } if (conv) { // convolutional units if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else { if (imgsPerThread == 8) { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<4, 1, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<4, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<4, 3, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<4, 1, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<4, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<4, 3, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<2, 1, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<2, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<2, 3, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<2, 1, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<2, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<2, 3, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else { // do scale if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else { if (imgsPerThread == 8) { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<4, 1, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<4, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<4, 3, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<4, 1, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<4, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<4, 3, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<2, 1, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<2, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<2, 3, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<2, 1, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<2, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<2, 3, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } else { // local, unshared units if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else { if (imgsPerThread == 8) { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<4, 1, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 1, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<4, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<4, 3, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 3, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<4, 1, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 1, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<4, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<4, 3, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 3, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<2, 1, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 1, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<2, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<2, 3, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 3, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<2, 1, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 1, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<2, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<2, 3, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 3, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else { // do scale if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else { if (imgsPerThread == 8) { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<4, 1, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 1, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<4, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<4, 3, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 3, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<4, 1, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 1, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<4, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<4, 3, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<4, 3, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<2, 1, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 1, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<2, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<2, 3, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 3, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<2, 1, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 1, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<2, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<2, 3, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<2, 3, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } cutilCheckMsg("imgActs: kernel execution failed"); } void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true); } void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true); } void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false); } void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false); } /* * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages) * colorIndices: (numGroups, numFilterColors) * * where overSample := (numFilterColors * numGroups) / numImgColors * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _imgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numImages = hidActs.getNumCols(); int numFilters = filters.getNumCols(); // int numFiltersPerGroup = numFilters / numGroups; int numModules = hidActs.getNumRows() / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSizeY * imgSizeX; int numModulesX = numModules / numModulesY; int overSample = (numFilterColors * numGroups) / numImgColors; assert(numImgColors % numFilterColors == 0); assert(numFilters % (16*numGroups) == 0); assert((numFilterColors * numGroups) % numImgColors == 0); assert(numGroups > 1); assert(numFilterColors > 3 && numFilterColors % 2 == 0); assert(filterPixels == filterSize * filterSize); assert(hidActs.getNumRows() == numModules * numFilters); assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels); assert(numModules == numModulesY * numModulesX); assert(hidActs.isContiguous()); assert(filters.isContiguous()); assert(!hidActs.isTrans()); assert(!filters.isTrans()); assert(!targets.isTrans()); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0); assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); assert(moduleStride <= filterSize); assert(targets.isContiguous()); // no stride support here! dim3 blocks; dim3 threads; int colorsPerThread; int imgsPerThread; if (numFilterColors % 8 == 0) { threads = dim3(32, 4); colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2; imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; assert(numFilterColors % (threads.y * colorsPerThread) == 0); blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), overSample * imgPixels); } else if (numFilterColors > 3) { imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,16*imgsPerThread) * (numImgColors / colorsPerThread), overSample * DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; if (scaleTargets == 0) { // do not scale or use targets matrix targets.resize(overSample*numImgColors*imgPixels, numImages); } else { assert(targets.getNumRows() == overSample * numImgColors * imgPixels); assert(targets.getNumCols() == numImages); } if (conv) { if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } } else { // do scale if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } } } else { if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } } else { // do scale if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } } } cutilCheckMsg("imgActsSparse: kernel execution failed"); } void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) { _imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, true); } void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, true); } void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) { _imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, false); } void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, false); }
5b0d827ec3333126a3f4659db7acc3397a70fb36.cu
/* * Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cudaconv2.cuh> #include "tt.h" /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread. * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise * targets: (numColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * Number of filters must be divisible by 16. * Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. */ template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_color(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[numColors*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int blockCaseIdx = blockIdx.x * 16*imgsPerThread; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const int numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeX * imgSizeY; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + loadY * numImages * numModules + loadX; filters += threadIdx.x; targets += pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[numColors][imgsPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f]; #pragma unroll for (int c = 0; c < numColors; c++) { shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread) * blockIdx.x.y = 1..numImgColors/colorsPerThread * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * numImageColors/numGroups must be divisible by colorsPerThread. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are 4-16 color channels. */ template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,16*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const uint numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX; filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x; targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by 16. * * B_X * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16 __shared__ float shHidActs[16][B_X*imgsPerThread]; const int numColorsPerBlock = B_Y*colorsPerThread; const int numImagesPerBlock = B_X*imgsPerThread; const int numImgBlocks = DIVUP(numImages, numImagesPerBlock); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * numImagesPerBlock; const int imgColorIdx = (blockIdx.x / numImgBlocks) * numColorsPerBlock; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32; const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16; const int numModules = numModulesY * numModulesX; /* //------------- { int i_img_l, j_l, f_l, moduleIdx_l; //numImgBlocks = DIVUP(numImages, numImagesPerBlock); //blocks = dim3(numImgBlocks * (numImgColors/numColorsPerBlock), imgPixels); //imgColorIdx = (blockIdx.x / numImgBlocks) * numColorsPerBlock //blockCaseIdx=(blockIdx.x % numImgBlocks) * numImagesPerBlock; //block.x = bx_img_block + numImgBlocks * bx_color_block //moduleIdx = my * numModulesX + mx; //imgColorIdx = bx_color_block * numColorsPerBlock; // color idx globally BaseIndex<3> hidIndex; hidIndex << Index(numFiltersPerGroup, blockGroupIdx) //= blockFilterIdx, blockGroupIdx = imgColorIdx / numFilterColors; >>f_l //16 < numFiltersPerGroup >>j_l //B_X*B_Y/32 < 16 << Index(1, hidActLoadY) // < B_X*B_Y/32 << numModules >>moduleIdx_l //=my * numModulesX + mx < numModules << numImages << Index(numImagesPerBlock, (blockIdx.x % numImgBlocks)) //blockCaseIdx >> i_img_l//step 32 < numImagesPerBlock << Index(1, hidActLoadX);//tidx % 32; //------------------ int pxIdxInFilter_l, i_clr_l; BaseIndex<3> filterIndex; filterIndex << Index(1, filterColorIdx) //= imgColorIdx % numFilterColors; // color idx within group //fLoad[i * filterPixels * numFilters]; >> i_clr_l // B_X*B_Y/16, < numColorsPerBlock << Index(1, filtersLoadY) //< B_X*B_Y/16 << filterPixels //filters[pxIdxInFilter * numFilters + f] >> pxIdxInFilter_l << numFilters << Index(numFiltersPerGroup, blockGroupIdx) //blockFilterIdx >> f_l // 16 < numFiltersPerGroup << Index(1, filtersLoadX); // < 16 //------------------ //c * B_Y * imgPixels * numImages + i * B_X int c_t_l, i_t_l; BaseIndex<2> targetIndex; targetIndex << Index(1, imgColorIdx) //(blockIdx.x / numImgBlocks) * numColorsPerBlock >> c_t_l // *B_Y << Index(1, threadIdx.y) << imgPixels << Index(1, blockPixelIdx) << numImages << Index(1, blockCaseIdx)// (blockIdx.x % numImgBlocks) * numImagesPerBlock; >> i_t_l // *B_X << Index(1, threadIdx.x); float prod[colorsPerThread][imgsPerThread]; memset(prod, 0, sizeof(prod)); const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time for (int i = 0; i < numImagesPerBlock; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActs[j + hidActLoadY][i + hidActLoadX] = hidActs[hidIndex._offset + OFFSET(f, hidIndex) + OFFSET(j, hidIndex) + OFFSET_(moduleIdx, moduleIdx_l, hidIndex) + OFFSET_(i, i_img_l, hidIndex)]; } } else { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActs[j + hidActLoadY][i + hidActLoadX] = 0; } } } //no local connection here! // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) { if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilters[filtersLoadY + i][filtersLoadX] = filters[filterIndex._offset + OFFSET_(i, i_clr_l, filterIndex) + OFFSET(pxIdxInFilter,filterIndex) + OFFSET(f,filterIndex)]; } } __syncthreads(); #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); }//f }//mx }//my if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { int toff = targetIndex._offset + OFFSET_(c, c_t_l, targetIndex) + OFFSET_(i, i_t_l, targetIndex); targets[toff] = scaleTargets * targets[toff] + scaleOutputs * prod[c][i]; } //c } //if } //i } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { int toff = targetIndex._offset + OFFSET_(c, c_t_l, targetIndex) + OFFSET_(i, i_t_l, targetIndex); targets[toff] = scaleOutputs * prod[c][i]; } //c }// if } // i }// if scale } */ //------------------ hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; memset(prod, 0, sizeof(prod)); const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * B_X; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) { if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread) * blockIdx.x.y = 1..numImgColors/colorsPerThread * blockIdx.y determines 4x4 image region in target image, also sample * In essence, blockIdx.y.x = 1..numRegions * blockIdx.y.y = 1..overSample * * threadIdx.x determines case. * threadIdx.y determines pixel. * * overSample := numFilterColors*numGroups/numImgColors * ^ this is the number of groups that each color channel is connected to * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages) * * colorIndices: (numGroups, numFilterColors) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by colorsPerThread. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are 4-16 color channels. */ template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_mediumcolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numFilterColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; __shared__ int shColors[colorsPerThread]; // not really necessary -- can repurpose the other shmems const int numImgBlocks = DIVUP(numImages,16*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread; const int numRegionsX = DIVUP(imgSizeX, 4); const int numRegions = numRegionsX * numRegionsX; const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int overSample = gridDim.y / numRegions; const int blockSample = blockIdx.y / numRegions; const int groupsPerSample = numGroups / overSample; const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockRegionIdx = blockIdx.y % numRegions; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const uint numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX; filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x; targets += blockSample * numImgColors * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; if (tidx < colorsPerThread) { shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages; } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[shColors[c] + i * 16] = scaleTargets * targets[shColors[c] + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[shColors[c] + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image, sample idx. * In essence, blockIdx.y.x = 1..imgPixels * blockIdx.y.y = 1..overSample * * threadIdx.x determines case. * threadIdx.y determines color. * * overSample := numFilterColors*numGroups/numImgColors * ^ this is the number of groups that each color channel is connected to * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages) * * colorIndices: (numGroups, numFilterColors) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by 16. * numFilterColors*numGroups must be divisible by numImgColors. * * B_X * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_manycolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numFilterColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16 __shared__ float shHidActs[16][B_X*imgsPerThread]; __shared__ int shColors[colorsPerThread * B_Y]; // not really necessary -- can repurpose the other shmems const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32; const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16; const int numModules = numModulesY * numModulesX; const int overSample = gridDim.y / imgPixels; const int blockSample = blockIdx.y / imgPixels; const int groupsPerSample = numGroups / overSample; // const int overSample = (numFilterColors * numGroups) / numImgColors; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y * colorsPerThread; // color idx globally const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample; // const int filterColorsPerSample = numFilterColors / overSample; const int blockPixelIdx = blockIdx.y % imgPixels; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += blockSample * numImgColors * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; if (tidx < colorsPerThread * B_Y) { shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages; } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * B_X; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) { if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleTargets * targets[shColors[c * B_Y + threadIdx.y] + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * hidActs: (numFilters, numModules, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgPixels, numImages) * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _imgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numImages = hidActs.getNumCols(); int numFilters = filters.getNumCols(); int numModules = hidActs.getNumRows() / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSizeY * imgSizeX; int numModulesX = numModules / numModulesY; assert(numImgColors % numGroups == 0); assert(numFilters % (16*numGroups) == 0); assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 4 == 0); assert(filterPixels == filterSize * filterSize); assert(hidActs.getNumRows() == numModules * numFilters); assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels); assert(numModules == numModulesY * numModulesX); assert(hidActs.isContiguous()); assert(filters.isContiguous()); assert(!hidActs.isTrans()); assert(!filters.isTrans()); assert(!targets.isTrans()); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0); assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); assert(moduleStride <= filterSize); assert(targets.isContiguous()); // no stride support here! dim3 blocks; dim3 threads(16,16); int colorsPerThread; int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; if (numFilterColors % 8 == 0) { threads = dim3(32, 4); colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2; imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; assert(numFilterColors % (threads.y * colorsPerThread) == 0); blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels); } else if (numFilterColors > 3) { colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } else { blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; if (scaleTargets == 0) { // do not scale or use targets matrix targets.resize(numImgColors*imgPixels, numImages); } else { assert(targets.getNumRows() == numImgColors * imgPixels); assert(targets.getNumCols() == numImages); } if (conv) { // convolutional units if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else { if (imgsPerThread == 8) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, false, true, true>, cudaFuncCachePreferShared); img_acts_color<4, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_color<4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, false, true, true>, cudaFuncCachePreferShared); img_acts_color<4, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, false, false, true>, cudaFuncCachePreferShared); img_acts_color<4, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_color<4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, false, false, true>, cudaFuncCachePreferShared); img_acts_color<4, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, false, true, true>, cudaFuncCachePreferShared); img_acts_color<2, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_color<2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, false, true, true>, cudaFuncCachePreferShared); img_acts_color<2, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, false, false, true>, cudaFuncCachePreferShared); img_acts_color<2, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_color<2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, false, false, true>, cudaFuncCachePreferShared); img_acts_color<2, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else { // do scale if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else { if (imgsPerThread == 8) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, true, true, true>, cudaFuncCachePreferShared); img_acts_color<4, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_color<4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, true, true, true>, cudaFuncCachePreferShared); img_acts_color<4, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, true, false, true>, cudaFuncCachePreferShared); img_acts_color<4, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_color<4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, true, false, true>, cudaFuncCachePreferShared); img_acts_color<4, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, true, true, true>, cudaFuncCachePreferShared); img_acts_color<2, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_color<2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, true, true, true>, cudaFuncCachePreferShared); img_acts_color<2, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, true, false, true>, cudaFuncCachePreferShared); img_acts_color<2, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_color<2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, true, false, true>, cudaFuncCachePreferShared); img_acts_color<2, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } else { // local, unshared units if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else { if (imgsPerThread == 8) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 1, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 3, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 1, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 3, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, false, true, false>, cudaFuncCachePreferShared); img_acts_color<4, 1, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_color<4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, false, true, false>, cudaFuncCachePreferShared); img_acts_color<4, 3, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, false, false, false>, cudaFuncCachePreferShared); img_acts_color<4, 1, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_color<4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, false, false, false>, cudaFuncCachePreferShared); img_acts_color<4, 3, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, false, true, false>, cudaFuncCachePreferShared); img_acts_color<2, 1, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_color<2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, false, true, false>, cudaFuncCachePreferShared); img_acts_color<2, 3, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, false, false, false>, cudaFuncCachePreferShared); img_acts_color<2, 1, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_color<2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, false, false, false>, cudaFuncCachePreferShared); img_acts_color<2, 3, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else { // do scale if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else { if (imgsPerThread == 8) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 1, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 3, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 1, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 3, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, true, true, false>, cudaFuncCachePreferShared); img_acts_color<4, 1, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_color<4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, true, true, false>, cudaFuncCachePreferShared); img_acts_color<4, 3, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, true, false, false>, cudaFuncCachePreferShared); img_acts_color<4, 1, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_color<4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, true, false, false>, cudaFuncCachePreferShared); img_acts_color<4, 3, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, true, true, false>, cudaFuncCachePreferShared); img_acts_color<2, 1, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_color<2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, true, true, false>, cudaFuncCachePreferShared); img_acts_color<2, 3, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, true, false, false>, cudaFuncCachePreferShared); img_acts_color<2, 1, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_color<2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, true, false, false>, cudaFuncCachePreferShared); img_acts_color<2, 3, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } cutilCheckMsg("imgActs: kernel execution failed"); } void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true); } void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true); } void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false); } void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false); } /* * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages) * colorIndices: (numGroups, numFilterColors) * * where overSample := (numFilterColors * numGroups) / numImgColors * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _imgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numImages = hidActs.getNumCols(); int numFilters = filters.getNumCols(); // int numFiltersPerGroup = numFilters / numGroups; int numModules = hidActs.getNumRows() / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSizeY * imgSizeX; int numModulesX = numModules / numModulesY; int overSample = (numFilterColors * numGroups) / numImgColors; assert(numImgColors % numFilterColors == 0); assert(numFilters % (16*numGroups) == 0); assert((numFilterColors * numGroups) % numImgColors == 0); assert(numGroups > 1); assert(numFilterColors > 3 && numFilterColors % 2 == 0); assert(filterPixels == filterSize * filterSize); assert(hidActs.getNumRows() == numModules * numFilters); assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels); assert(numModules == numModulesY * numModulesX); assert(hidActs.isContiguous()); assert(filters.isContiguous()); assert(!hidActs.isTrans()); assert(!filters.isTrans()); assert(!targets.isTrans()); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0); assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); assert(moduleStride <= filterSize); assert(targets.isContiguous()); // no stride support here! dim3 blocks; dim3 threads; int colorsPerThread; int imgsPerThread; if (numFilterColors % 8 == 0) { threads = dim3(32, 4); colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2; imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; assert(numFilterColors % (threads.y * colorsPerThread) == 0); blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), overSample * imgPixels); } else if (numFilterColors > 3) { imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,16*imgsPerThread) * (numImgColors / colorsPerThread), overSample * DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; if (scaleTargets == 0) { // do not scale or use targets matrix targets.resize(overSample*numImgColors*imgPixels, numImages); } else { assert(targets.getNumRows() == overSample * numImgColors * imgPixels); assert(targets.getNumCols() == numImages); } if (conv) { if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } } else { // do scale if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } } } else { if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } } else { // do scale if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } } } cutilCheckMsg("imgActsSparse: kernel execution failed"); } void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) { _imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, true); } void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, true); } void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) { _imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, false); } void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, false); }
b0f8be5ad075450238df296a6743c1af5edbfb13.hip
// !!! This is a file automatically generated by hipify!!! // // Created by ascdc on 2021-05-27. // #include "utility.cuh" //by chen21019 __host__ __device__ bool ifchinese(int tmpUCS4) { return (tmpUCS4 >= 0x4E00 && tmpUCS4 <= 0x62FF) || (tmpUCS4 >= 0x6300 && tmpUCS4 <= 0x77FF) || (tmpUCS4 >= 0x7800 && tmpUCS4 <= 0x8CFF) || (tmpUCS4 >= 0x8D00 && tmpUCS4 <= 0x9FFF) || /* CJK Unified Ideographs Extension A */ (tmpUCS4 >= 0x3400 && tmpUCS4 <= 0x4DBF) || /* CJK Unified Ideographs Extension B */ (tmpUCS4 >= 0x20000 && tmpUCS4 <= 0x215FF) || (tmpUCS4 >= 0x21600 && tmpUCS4 <= 0x230FF) || (tmpUCS4 >= 0x23100 && tmpUCS4 <= 0x245FF) || (tmpUCS4 >= 0x24600 && tmpUCS4 <= 0x260FF) || (tmpUCS4 >= 0x26100 && tmpUCS4 <= 0x275FF) || (tmpUCS4 >= 0x27600 && tmpUCS4 <= 0x290FF) || (tmpUCS4 >= 0x29100 && tmpUCS4 <= 0x2A6DF) || /* CJK Unified Ideographs Extension C */ (tmpUCS4 >= 0x2A700 && tmpUCS4 <= 0x2B73F) || /* CJK Unified Ideographs Extension D */ (tmpUCS4 >= 0x2B740 && tmpUCS4 <= 0x2B81F) || /* CJK Unified Ideographs Extension E */ (tmpUCS4 >= 0x2B820 && tmpUCS4 <= 0x2CEAF) || /* CJK Unified Ideographs Extension F */ (tmpUCS4 >= 0x2CEB0 && tmpUCS4 <= 0x2EBEF) || /* Hiragana */ (tmpUCS4 >= 0x3040 && tmpUCS4 <= 0x309F) || /* Katakana */ (tmpUCS4 >= 0x30A0 && tmpUCS4 <= 0x30FF) || /* Katakana Phonetic Extensions */ (tmpUCS4 >= 0x31F0 && tmpUCS4 <= 0x31FF) || /* Hangul Jamo */ (tmpUCS4 >= 0x1100 && tmpUCS4 <= 0x11FF) || /* Hangul Jamo Extended-A */ (tmpUCS4 >= 0xA960 && tmpUCS4 <= 0xA97F) || /* Hangul Jamo Extended-B */ (tmpUCS4 >= 0xD7B0 && tmpUCS4 <= 0xD7FF) || /* Hangul Compatibility Jamo */ (tmpUCS4 >= 0x3130 && tmpUCS4 <= 0x318F) || /* Hangul Syllables */ (tmpUCS4 >= 0xAC00 && tmpUCS4 <= 0xD7AF); } //deprecated __host__ __device__ unsigned int utf32ChineseReCoding(unsigned int tmp) { unsigned int a = reCodeNotDef; if (tmp < 0x1100) { return a; } //0x1100~0x11FF if (tmp <= 0x11FF) { return tmp - 0x1100; } if (tmp < 0x3040) { return a; } //0x3040~0x9FFF if (tmp <= 0x9FFF) { return tmp - 0x3040 + 0x100; } if (tmp < 0xA960) { return a; } //0xA960~0xA97F if (tmp <= 0xA97F) { return tmp - 0xA960 + 0x100 + 0x6FC0; } if (tmp < 0xAC00) { return a; } //0xAC00~0xD7FF if (tmp <= 0xD7FF) { return tmp - 0xAC00 + 0x100 + 0x6FC0 + 0x20; } if (tmp < 0x20000) { return a; } //0x20000~0x2EBFF if (tmp <= 0x2EBFF) { return tmp - 0x20000 + 0x100 + 0x6FC0 + 0x20 + 0x2C00; } //MAX 188DE return a; } //deprecated __host__ __device__ unsigned int Reutf32ChineseReCoding(unsigned int tmp) { if (tmp == reCodeNotDef) return tmp; //0x1100~0x11FF if (tmp < 0x100) { return tmp + 0x1100; } //0x3040~0x9FFF if (tmp < 0x100 + 0x6FC0) { return tmp + 0x3040 - 0x100; } //0xA960~0xA97F if (tmp < 0x100 + 0x6FC0 + 0x20) { return tmp + 0xA960 - 0x100 - 0x6FC0; } //0xAC00~0xD7FF if (tmp < 0x100 + 0x6FC0 + 0x20 + 0x2C00) { return tmp + 0xAC00 - 0x100 - 0x6FC0 - 0x20; } return tmp + 0x20000 - 0x100 - 0x6FC0 - 0x20 - 0x2C00; } //catch cuda error void catchError() { hipError_t err; err = hipGetLastError(); if (err != hipSuccess) { std::wcout << hipGetErrorString(err) << std::endl; exit(1); } }
b0f8be5ad075450238df296a6743c1af5edbfb13.cu
// // Created by ascdc on 2021-05-27. // #include "utility.cuh" //by chen21019 __host__ __device__ bool ifchinese(int tmpUCS4) { return (tmpUCS4 >= 0x4E00 && tmpUCS4 <= 0x62FF) || (tmpUCS4 >= 0x6300 && tmpUCS4 <= 0x77FF) || (tmpUCS4 >= 0x7800 && tmpUCS4 <= 0x8CFF) || (tmpUCS4 >= 0x8D00 && tmpUCS4 <= 0x9FFF) || /* CJK Unified Ideographs Extension A */ (tmpUCS4 >= 0x3400 && tmpUCS4 <= 0x4DBF) || /* CJK Unified Ideographs Extension B */ (tmpUCS4 >= 0x20000 && tmpUCS4 <= 0x215FF) || (tmpUCS4 >= 0x21600 && tmpUCS4 <= 0x230FF) || (tmpUCS4 >= 0x23100 && tmpUCS4 <= 0x245FF) || (tmpUCS4 >= 0x24600 && tmpUCS4 <= 0x260FF) || (tmpUCS4 >= 0x26100 && tmpUCS4 <= 0x275FF) || (tmpUCS4 >= 0x27600 && tmpUCS4 <= 0x290FF) || (tmpUCS4 >= 0x29100 && tmpUCS4 <= 0x2A6DF) || /* CJK Unified Ideographs Extension C */ (tmpUCS4 >= 0x2A700 && tmpUCS4 <= 0x2B73F) || /* CJK Unified Ideographs Extension D */ (tmpUCS4 >= 0x2B740 && tmpUCS4 <= 0x2B81F) || /* CJK Unified Ideographs Extension E */ (tmpUCS4 >= 0x2B820 && tmpUCS4 <= 0x2CEAF) || /* CJK Unified Ideographs Extension F */ (tmpUCS4 >= 0x2CEB0 && tmpUCS4 <= 0x2EBEF) || /* Hiragana */ (tmpUCS4 >= 0x3040 && tmpUCS4 <= 0x309F) || /* Katakana */ (tmpUCS4 >= 0x30A0 && tmpUCS4 <= 0x30FF) || /* Katakana Phonetic Extensions */ (tmpUCS4 >= 0x31F0 && tmpUCS4 <= 0x31FF) || /* Hangul Jamo */ (tmpUCS4 >= 0x1100 && tmpUCS4 <= 0x11FF) || /* Hangul Jamo Extended-A */ (tmpUCS4 >= 0xA960 && tmpUCS4 <= 0xA97F) || /* Hangul Jamo Extended-B */ (tmpUCS4 >= 0xD7B0 && tmpUCS4 <= 0xD7FF) || /* Hangul Compatibility Jamo */ (tmpUCS4 >= 0x3130 && tmpUCS4 <= 0x318F) || /* Hangul Syllables */ (tmpUCS4 >= 0xAC00 && tmpUCS4 <= 0xD7AF); } //deprecated __host__ __device__ unsigned int utf32ChineseReCoding(unsigned int tmp) { unsigned int a = reCodeNotDef; if (tmp < 0x1100) { return a; } //0x1100~0x11FF if (tmp <= 0x11FF) { return tmp - 0x1100; } if (tmp < 0x3040) { return a; } //0x3040~0x9FFF if (tmp <= 0x9FFF) { return tmp - 0x3040 + 0x100; } if (tmp < 0xA960) { return a; } //0xA960~0xA97F if (tmp <= 0xA97F) { return tmp - 0xA960 + 0x100 + 0x6FC0; } if (tmp < 0xAC00) { return a; } //0xAC00~0xD7FF if (tmp <= 0xD7FF) { return tmp - 0xAC00 + 0x100 + 0x6FC0 + 0x20; } if (tmp < 0x20000) { return a; } //0x20000~0x2EBFF if (tmp <= 0x2EBFF) { return tmp - 0x20000 + 0x100 + 0x6FC0 + 0x20 + 0x2C00; } //MAX 188DE return a; } //deprecated __host__ __device__ unsigned int Reutf32ChineseReCoding(unsigned int tmp) { if (tmp == reCodeNotDef) return tmp; //0x1100~0x11FF if (tmp < 0x100) { return tmp + 0x1100; } //0x3040~0x9FFF if (tmp < 0x100 + 0x6FC0) { return tmp + 0x3040 - 0x100; } //0xA960~0xA97F if (tmp < 0x100 + 0x6FC0 + 0x20) { return tmp + 0xA960 - 0x100 - 0x6FC0; } //0xAC00~0xD7FF if (tmp < 0x100 + 0x6FC0 + 0x20 + 0x2C00) { return tmp + 0xAC00 - 0x100 - 0x6FC0 - 0x20; } return tmp + 0x20000 - 0x100 - 0x6FC0 - 0x20 - 0x2C00; } //catch cuda error void catchError() { cudaError_t err; err = cudaGetLastError(); if (err != cudaSuccess) { std::wcout << cudaGetErrorString(err) << std::endl; exit(1); } }
c00a47785004f9cad6859c84f61754a2b7f98ce5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "renderer.h" namespace cuda_renderer { #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } template<typename T> device_vector_holder<T>::~device_vector_holder(){ __free(); } template<typename T> void device_vector_holder<T>::__free(){ if(valid){ hipFree(__gpu_memory); valid = false; __size = 0; } } template<typename T> device_vector_holder<T>::device_vector_holder(size_t size_, T init) { __malloc(size_); thrust::fill(begin_thr(), end_thr(), init); } template<typename T> void device_vector_holder<T>::__malloc(size_t size_){ if(valid) __free(); hipMalloc((void**)&__gpu_memory, size_ * sizeof(T)); __size = size_; valid = true; } template<typename T> device_vector_holder<T>::device_vector_holder(size_t size_){ __malloc(size_); } template class device_vector_holder<int>; void print_cuda_memory_usage(){ // show memory usage of GPU size_t free_byte ; size_t total_byte ; auto cuda_status = hipMemGetInfo( &free_byte, &total_byte ) ; if ( hipSuccess != cuda_status ){ printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) ); exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n", used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); } struct max2zero_functor{ max2zero_functor(){} __host__ __device__ int32_t operator()(const int32_t& x) const { return (x==INT_MAX)? 0: x; } }; __device__ void rasterization(const Model::Triangle dev_tri, Model::float3 last_row, int32_t* depth_entry, size_t width, size_t height, const Model::ROI roi){ // refer to tiny renderer // https://github.com/ssloy/tinyrenderer/blob/master/our_gl.cpp float pts2[3][2]; // viewport transform(0, 0, width, height) pts2[0][0] = dev_tri.v0.x/last_row.x*width/2.0f+width/2.0f; pts2[0][1] = dev_tri.v0.y/last_row.x*height/2.0f+height/2.0f; pts2[1][0] = dev_tri.v1.x/last_row.y*width/2.0f+width/2.0f; pts2[1][1] = dev_tri.v1.y/last_row.y*height/2.0f+height/2.0f; pts2[2][0] = dev_tri.v2.x/last_row.z*width/2.0f+width/2.0f; pts2[2][1] = dev_tri.v2.y/last_row.z*height/2.0f+height/2.0f; float bboxmin[2] = {FLT_MAX, FLT_MAX}; float bboxmax[2] = {-FLT_MAX, -FLT_MAX}; float clamp_max[2] = {float(width-1), float(height-1)}; float clamp_min[2] = {0, 0}; size_t real_width = width; if(roi.width > 0 && roi.height > 0){ // depth will be flipped clamp_min[0] = roi.x; clamp_min[1] = height-1 - (roi.y + roi.height - 1); clamp_max[0] = (roi.x + roi.width) - 1; clamp_max[1] = height-1 - roi.y; real_width = roi.width; } for (int i=0; i<3; i++) { for (int j=0; j<2; j++) { bboxmin[j] = std__max(clamp_min[j], std__min(bboxmin[j], pts2[i][j])); bboxmax[j] = std__min(clamp_max[j], std__max(bboxmax[j], pts2[i][j])); } } size_t P[2]; for(P[1] = size_t(bboxmin[1]+0.5f); P[1]<=bboxmax[1]; P[1] += 1){ for(P[0] = size_t(bboxmin[0]+0.5f); P[0]<=bboxmax[0]; P[0] += 1){ Model::float3 bc_screen = barycentric(pts2[0], pts2[1], pts2[2], P); if (bc_screen.x<-0.0f || bc_screen.y<-0.0f || bc_screen.z<-0.0f || bc_screen.x>1.0f || bc_screen.y>1.0f || bc_screen.z>1.0f ) continue; Model::float3 bc_over_z = {bc_screen.x/last_row.x, bc_screen.y/last_row.y, bc_screen.z/last_row.z}; // refer to https://en.wikibooks.org/wiki/Cg_Programming/Rasterization, Perspectively Correct Interpolation // float frag_depth = (dev_tri.v0.z * bc_over_z.x + dev_tri.v1.z * bc_over_z.y + dev_tri.v2.z * bc_over_z.z) // /(bc_over_z.x + bc_over_z.y + bc_over_z.z); // this seems better float frag_depth = (bc_screen.x + bc_screen.y + bc_screen.z) /(bc_over_z.x + bc_over_z.y + bc_over_z.z); size_t x_to_write = (P[0] + roi.x); size_t y_to_write = (height-1 - P[1] - roi.y); int32_t depth = int32_t(frag_depth/**1000*/ + 0.5f); int32_t& depth_to_write = depth_entry[x_to_write+y_to_write*real_width]; atomicMin(&depth_to_write, depth); } } } __global__ void render_triangle(Model::Triangle* device_tris_ptr, size_t device_tris_size, Model::mat4x4* device_poses_ptr, size_t device_poses_size, int32_t* depth_image_vec, size_t width, size_t height, const Model::mat4x4 proj_mat, const Model::ROI roi){ size_t pose_i = blockIdx.y; size_t tri_i = blockIdx.x*blockDim.x + threadIdx.x; if(tri_i>=device_tris_size) return; // if(pose_i>=device_poses_size) return; size_t real_width = width; size_t real_height = height; if(roi.width > 0 && roi.height > 0){ real_width = roi.width; real_height = roi.height; } int32_t* depth_entry = depth_image_vec + pose_i*real_width*real_height; //length: width*height 32bits int Model::mat4x4* pose_entry = device_poses_ptr + pose_i; // length: 16 32bits float Model::Triangle* tri_entry = device_tris_ptr + tri_i; // length: 9 32bits float // model transform Model::Triangle local_tri = transform_triangle(*tri_entry, *pose_entry); // if(normal_functor::is_back(local_tri)) return; //back face culling, need to be disable for not well defined surfaces? // assume last column of projection matrix is 0 0 1 0 Model::float3 last_row = { local_tri.v0.z, local_tri.v1.z, local_tri.v2.z }; // projection transform local_tri = transform_triangle(local_tri, proj_mat); rasterization(local_tri, last_row, depth_entry, width, height, roi); } std::vector<int32_t> render_cuda(const std::vector<Model::Triangle>& tris,const std::vector<Model::mat4x4>& poses, size_t width, size_t height, const Model::mat4x4& proj_mat, const Model::ROI roi){ const size_t threadsPerBlock = 256; thrust::device_vector<Model::Triangle> device_tris = tris; thrust::device_vector<Model::mat4x4> device_poses = poses; size_t real_width = width; size_t real_height = height; if(roi.width > 0 && roi.height > 0){ real_width = roi.width; real_height = roi.height; assert(roi.x + roi.width <= width && "roi out of image"); assert(roi.y + roi.height <= height && "roi out of image"); } // atomic min only support int32 thrust::device_vector<int32_t> device_depth_int(poses.size()*real_width*real_height, INT_MAX); { Model::Triangle* device_tris_ptr = thrust::raw_pointer_cast(device_tris.data()); Model::mat4x4* device_poses_ptr = thrust::raw_pointer_cast(device_poses.data()); int32_t* depth_image_vec = thrust::raw_pointer_cast(device_depth_int.data()); dim3 numBlocks((tris.size() + threadsPerBlock - 1) / threadsPerBlock, poses.size()); hipLaunchKernelGGL(( render_triangle), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, device_tris_ptr, tris.size(), device_poses_ptr, poses.size(), depth_image_vec, width, height, proj_mat, roi); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); } std::vector<int32_t> result_depth(poses.size()*real_width*real_height); { thrust::transform(device_depth_int.begin(), device_depth_int.end(), device_depth_int.begin(), max2zero_functor()); thrust::copy(device_depth_int.begin(), device_depth_int.end(), result_depth.begin()); } return result_depth; } std::vector<int32_t> render_cuda(device_vector_holder<Model::Triangle>& device_tris,const std::vector<Model::mat4x4>& poses, size_t width, size_t height, const Model::mat4x4& proj_mat, const Model::ROI roi){ const size_t threadsPerBlock = 256; thrust::device_vector<Model::mat4x4> device_poses = poses; size_t real_width = width; size_t real_height = height; if(roi.width > 0 && roi.height > 0){ real_width = roi.width; real_height = roi.height; assert(roi.x + roi.width <= width && "roi out of image"); assert(roi.y + roi.height <= height && "roi out of image"); } // atomic min only support int32 thrust::device_vector<int32_t> device_depth_int(poses.size()*real_width*real_height, INT_MAX); { Model::mat4x4* device_poses_ptr = thrust::raw_pointer_cast(device_poses.data()); int32_t* depth_image_vec = thrust::raw_pointer_cast(device_depth_int.data()); dim3 numBlocks((device_tris.size() + threadsPerBlock - 1) / threadsPerBlock, poses.size()); hipLaunchKernelGGL(( render_triangle), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, device_tris.data(), device_tris.size(), device_poses_ptr, poses.size(), depth_image_vec, width, height, proj_mat, roi); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); } std::vector<int32_t> result_depth(poses.size()*real_width*real_height); { thrust::transform(device_depth_int.begin(), device_depth_int.end(), device_depth_int.begin(), max2zero_functor()); thrust::copy(device_depth_int.begin(), device_depth_int.end(), result_depth.begin()); } return result_depth; } device_vector_holder<int> render_cuda_keep_in_gpu(const std::vector<Model::Triangle>& tris,const std::vector<Model::mat4x4>& poses, size_t width, size_t height, const Model::mat4x4& proj_mat, const Model::ROI roi){ const size_t threadsPerBlock = 256; thrust::device_vector<Model::Triangle> device_tris = tris; thrust::device_vector<Model::mat4x4> device_poses = poses; size_t real_width = width; size_t real_height = height; if(roi.width > 0 && roi.height > 0){ real_width = roi.width; real_height = roi.height; } // atomic min only support int32 // thrust::device_vector<int32_t> device_depth_int(poses.size()*real_width*real_height, INT_MAX); device_vector_holder<int> device_depth_int(poses.size()*real_width*real_height, INT_MAX); { Model::Triangle* device_tris_ptr = thrust::raw_pointer_cast(device_tris.data()); Model::mat4x4* device_poses_ptr = thrust::raw_pointer_cast(device_poses.data()); int32_t* depth_image_vec = device_depth_int.data(); dim3 numBlocks((tris.size() + threadsPerBlock - 1) / threadsPerBlock, poses.size()); hipLaunchKernelGGL(( render_triangle), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, device_tris_ptr, tris.size(), device_poses_ptr, poses.size(), depth_image_vec, width, height, proj_mat, roi); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); } thrust::transform(device_depth_int.begin_thr(), device_depth_int.end_thr(), device_depth_int.begin_thr(), max2zero_functor()); return device_depth_int; } device_vector_holder<int> render_cuda_keep_in_gpu(device_vector_holder<Model::Triangle>& tris,const std::vector<Model::mat4x4>& poses, size_t width, size_t height, const Model::mat4x4& proj_mat, const Model::ROI roi){ const size_t threadsPerBlock = 256; thrust::device_vector<Model::mat4x4> device_poses = poses; size_t real_width = width; size_t real_height = height; if(roi.width > 0 && roi.height > 0){ real_width = roi.width; real_height = roi.height; } // atomic min only support int32 // thrust::device_vector<int32_t> device_depth_int(poses.size()*real_width*real_height, INT_MAX); device_vector_holder<int> device_depth_int(poses.size()*real_width*real_height, INT_MAX); { Model::mat4x4* device_poses_ptr = thrust::raw_pointer_cast(device_poses.data()); int32_t* depth_image_vec = device_depth_int.data(); dim3 numBlocks((tris.size() + threadsPerBlock - 1) / threadsPerBlock, poses.size()); hipLaunchKernelGGL(( render_triangle), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, tris.data(), tris.size(), device_poses_ptr, poses.size(), depth_image_vec, width, height, proj_mat, roi); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); } thrust::transform(device_depth_int.begin_thr(), device_depth_int.end_thr(), device_depth_int.begin_thr(), max2zero_functor()); return device_depth_int; } }
c00a47785004f9cad6859c84f61754a2b7f98ce5.cu
#include "renderer.h" namespace cuda_renderer { #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } template<typename T> device_vector_holder<T>::~device_vector_holder(){ __free(); } template<typename T> void device_vector_holder<T>::__free(){ if(valid){ cudaFree(__gpu_memory); valid = false; __size = 0; } } template<typename T> device_vector_holder<T>::device_vector_holder(size_t size_, T init) { __malloc(size_); thrust::fill(begin_thr(), end_thr(), init); } template<typename T> void device_vector_holder<T>::__malloc(size_t size_){ if(valid) __free(); cudaMalloc((void**)&__gpu_memory, size_ * sizeof(T)); __size = size_; valid = true; } template<typename T> device_vector_holder<T>::device_vector_holder(size_t size_){ __malloc(size_); } template class device_vector_holder<int>; void print_cuda_memory_usage(){ // show memory usage of GPU size_t free_byte ; size_t total_byte ; auto cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ; if ( cudaSuccess != cuda_status ){ printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); exit(1); } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n", used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); } struct max2zero_functor{ max2zero_functor(){} __host__ __device__ int32_t operator()(const int32_t& x) const { return (x==INT_MAX)? 0: x; } }; __device__ void rasterization(const Model::Triangle dev_tri, Model::float3 last_row, int32_t* depth_entry, size_t width, size_t height, const Model::ROI roi){ // refer to tiny renderer // https://github.com/ssloy/tinyrenderer/blob/master/our_gl.cpp float pts2[3][2]; // viewport transform(0, 0, width, height) pts2[0][0] = dev_tri.v0.x/last_row.x*width/2.0f+width/2.0f; pts2[0][1] = dev_tri.v0.y/last_row.x*height/2.0f+height/2.0f; pts2[1][0] = dev_tri.v1.x/last_row.y*width/2.0f+width/2.0f; pts2[1][1] = dev_tri.v1.y/last_row.y*height/2.0f+height/2.0f; pts2[2][0] = dev_tri.v2.x/last_row.z*width/2.0f+width/2.0f; pts2[2][1] = dev_tri.v2.y/last_row.z*height/2.0f+height/2.0f; float bboxmin[2] = {FLT_MAX, FLT_MAX}; float bboxmax[2] = {-FLT_MAX, -FLT_MAX}; float clamp_max[2] = {float(width-1), float(height-1)}; float clamp_min[2] = {0, 0}; size_t real_width = width; if(roi.width > 0 && roi.height > 0){ // depth will be flipped clamp_min[0] = roi.x; clamp_min[1] = height-1 - (roi.y + roi.height - 1); clamp_max[0] = (roi.x + roi.width) - 1; clamp_max[1] = height-1 - roi.y; real_width = roi.width; } for (int i=0; i<3; i++) { for (int j=0; j<2; j++) { bboxmin[j] = std__max(clamp_min[j], std__min(bboxmin[j], pts2[i][j])); bboxmax[j] = std__min(clamp_max[j], std__max(bboxmax[j], pts2[i][j])); } } size_t P[2]; for(P[1] = size_t(bboxmin[1]+0.5f); P[1]<=bboxmax[1]; P[1] += 1){ for(P[0] = size_t(bboxmin[0]+0.5f); P[0]<=bboxmax[0]; P[0] += 1){ Model::float3 bc_screen = barycentric(pts2[0], pts2[1], pts2[2], P); if (bc_screen.x<-0.0f || bc_screen.y<-0.0f || bc_screen.z<-0.0f || bc_screen.x>1.0f || bc_screen.y>1.0f || bc_screen.z>1.0f ) continue; Model::float3 bc_over_z = {bc_screen.x/last_row.x, bc_screen.y/last_row.y, bc_screen.z/last_row.z}; // refer to https://en.wikibooks.org/wiki/Cg_Programming/Rasterization, Perspectively Correct Interpolation // float frag_depth = (dev_tri.v0.z * bc_over_z.x + dev_tri.v1.z * bc_over_z.y + dev_tri.v2.z * bc_over_z.z) // /(bc_over_z.x + bc_over_z.y + bc_over_z.z); // this seems better float frag_depth = (bc_screen.x + bc_screen.y + bc_screen.z) /(bc_over_z.x + bc_over_z.y + bc_over_z.z); size_t x_to_write = (P[0] + roi.x); size_t y_to_write = (height-1 - P[1] - roi.y); int32_t depth = int32_t(frag_depth/**1000*/ + 0.5f); int32_t& depth_to_write = depth_entry[x_to_write+y_to_write*real_width]; atomicMin(&depth_to_write, depth); } } } __global__ void render_triangle(Model::Triangle* device_tris_ptr, size_t device_tris_size, Model::mat4x4* device_poses_ptr, size_t device_poses_size, int32_t* depth_image_vec, size_t width, size_t height, const Model::mat4x4 proj_mat, const Model::ROI roi){ size_t pose_i = blockIdx.y; size_t tri_i = blockIdx.x*blockDim.x + threadIdx.x; if(tri_i>=device_tris_size) return; // if(pose_i>=device_poses_size) return; size_t real_width = width; size_t real_height = height; if(roi.width > 0 && roi.height > 0){ real_width = roi.width; real_height = roi.height; } int32_t* depth_entry = depth_image_vec + pose_i*real_width*real_height; //length: width*height 32bits int Model::mat4x4* pose_entry = device_poses_ptr + pose_i; // length: 16 32bits float Model::Triangle* tri_entry = device_tris_ptr + tri_i; // length: 9 32bits float // model transform Model::Triangle local_tri = transform_triangle(*tri_entry, *pose_entry); // if(normal_functor::is_back(local_tri)) return; //back face culling, need to be disable for not well defined surfaces? // assume last column of projection matrix is 0 0 1 0 Model::float3 last_row = { local_tri.v0.z, local_tri.v1.z, local_tri.v2.z }; // projection transform local_tri = transform_triangle(local_tri, proj_mat); rasterization(local_tri, last_row, depth_entry, width, height, roi); } std::vector<int32_t> render_cuda(const std::vector<Model::Triangle>& tris,const std::vector<Model::mat4x4>& poses, size_t width, size_t height, const Model::mat4x4& proj_mat, const Model::ROI roi){ const size_t threadsPerBlock = 256; thrust::device_vector<Model::Triangle> device_tris = tris; thrust::device_vector<Model::mat4x4> device_poses = poses; size_t real_width = width; size_t real_height = height; if(roi.width > 0 && roi.height > 0){ real_width = roi.width; real_height = roi.height; assert(roi.x + roi.width <= width && "roi out of image"); assert(roi.y + roi.height <= height && "roi out of image"); } // atomic min only support int32 thrust::device_vector<int32_t> device_depth_int(poses.size()*real_width*real_height, INT_MAX); { Model::Triangle* device_tris_ptr = thrust::raw_pointer_cast(device_tris.data()); Model::mat4x4* device_poses_ptr = thrust::raw_pointer_cast(device_poses.data()); int32_t* depth_image_vec = thrust::raw_pointer_cast(device_depth_int.data()); dim3 numBlocks((tris.size() + threadsPerBlock - 1) / threadsPerBlock, poses.size()); render_triangle<<<numBlocks, threadsPerBlock>>>(device_tris_ptr, tris.size(), device_poses_ptr, poses.size(), depth_image_vec, width, height, proj_mat, roi); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); } std::vector<int32_t> result_depth(poses.size()*real_width*real_height); { thrust::transform(device_depth_int.begin(), device_depth_int.end(), device_depth_int.begin(), max2zero_functor()); thrust::copy(device_depth_int.begin(), device_depth_int.end(), result_depth.begin()); } return result_depth; } std::vector<int32_t> render_cuda(device_vector_holder<Model::Triangle>& device_tris,const std::vector<Model::mat4x4>& poses, size_t width, size_t height, const Model::mat4x4& proj_mat, const Model::ROI roi){ const size_t threadsPerBlock = 256; thrust::device_vector<Model::mat4x4> device_poses = poses; size_t real_width = width; size_t real_height = height; if(roi.width > 0 && roi.height > 0){ real_width = roi.width; real_height = roi.height; assert(roi.x + roi.width <= width && "roi out of image"); assert(roi.y + roi.height <= height && "roi out of image"); } // atomic min only support int32 thrust::device_vector<int32_t> device_depth_int(poses.size()*real_width*real_height, INT_MAX); { Model::mat4x4* device_poses_ptr = thrust::raw_pointer_cast(device_poses.data()); int32_t* depth_image_vec = thrust::raw_pointer_cast(device_depth_int.data()); dim3 numBlocks((device_tris.size() + threadsPerBlock - 1) / threadsPerBlock, poses.size()); render_triangle<<<numBlocks, threadsPerBlock>>>(device_tris.data(), device_tris.size(), device_poses_ptr, poses.size(), depth_image_vec, width, height, proj_mat, roi); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); } std::vector<int32_t> result_depth(poses.size()*real_width*real_height); { thrust::transform(device_depth_int.begin(), device_depth_int.end(), device_depth_int.begin(), max2zero_functor()); thrust::copy(device_depth_int.begin(), device_depth_int.end(), result_depth.begin()); } return result_depth; } device_vector_holder<int> render_cuda_keep_in_gpu(const std::vector<Model::Triangle>& tris,const std::vector<Model::mat4x4>& poses, size_t width, size_t height, const Model::mat4x4& proj_mat, const Model::ROI roi){ const size_t threadsPerBlock = 256; thrust::device_vector<Model::Triangle> device_tris = tris; thrust::device_vector<Model::mat4x4> device_poses = poses; size_t real_width = width; size_t real_height = height; if(roi.width > 0 && roi.height > 0){ real_width = roi.width; real_height = roi.height; } // atomic min only support int32 // thrust::device_vector<int32_t> device_depth_int(poses.size()*real_width*real_height, INT_MAX); device_vector_holder<int> device_depth_int(poses.size()*real_width*real_height, INT_MAX); { Model::Triangle* device_tris_ptr = thrust::raw_pointer_cast(device_tris.data()); Model::mat4x4* device_poses_ptr = thrust::raw_pointer_cast(device_poses.data()); int32_t* depth_image_vec = device_depth_int.data(); dim3 numBlocks((tris.size() + threadsPerBlock - 1) / threadsPerBlock, poses.size()); render_triangle<<<numBlocks, threadsPerBlock>>>(device_tris_ptr, tris.size(), device_poses_ptr, poses.size(), depth_image_vec, width, height, proj_mat, roi); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); } thrust::transform(device_depth_int.begin_thr(), device_depth_int.end_thr(), device_depth_int.begin_thr(), max2zero_functor()); return device_depth_int; } device_vector_holder<int> render_cuda_keep_in_gpu(device_vector_holder<Model::Triangle>& tris,const std::vector<Model::mat4x4>& poses, size_t width, size_t height, const Model::mat4x4& proj_mat, const Model::ROI roi){ const size_t threadsPerBlock = 256; thrust::device_vector<Model::mat4x4> device_poses = poses; size_t real_width = width; size_t real_height = height; if(roi.width > 0 && roi.height > 0){ real_width = roi.width; real_height = roi.height; } // atomic min only support int32 // thrust::device_vector<int32_t> device_depth_int(poses.size()*real_width*real_height, INT_MAX); device_vector_holder<int> device_depth_int(poses.size()*real_width*real_height, INT_MAX); { Model::mat4x4* device_poses_ptr = thrust::raw_pointer_cast(device_poses.data()); int32_t* depth_image_vec = device_depth_int.data(); dim3 numBlocks((tris.size() + threadsPerBlock - 1) / threadsPerBlock, poses.size()); render_triangle<<<numBlocks, threadsPerBlock>>>(tris.data(), tris.size(), device_poses_ptr, poses.size(), depth_image_vec, width, height, proj_mat, roi); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); } thrust::transform(device_depth_int.begin_thr(), device_depth_int.end_thr(), device_depth_int.begin_thr(), max2zero_functor()); return device_depth_int; } }
9968bc8b510e8da22bae223ce1be149c93e70fe3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> // It's a M * N matrix #define M 6 #define N 3 // Each element is computed on one thread __global__ void add (int *A, int *B, int *C) { // Get the 1D Array index of the matrix int idx = blockDim.x * blockIdx.x + threadIdx.x; printf("idx = %d\n", idx); C[idx] = A[idx] + B[idx]; } int main () { // Host copies of the variables int A[M * N], B[M * N], C[M * N]; int i, j; for (i = 0; i < M * N; ++i) { A[i] = i + 1; B[i] = M * N - i - 1; } // Device copies of the variables int *d_a, *d_b, *d_c; int size = sizeof(int) * M * N; // Allocate memories to device copies of the objects hipMalloc((void**)&d_a, size); hipMalloc((void**)&d_b, size); hipMalloc((void**)&d_c, size); // Copy inputs to device hipMemcpy(d_a, &A, size, hipMemcpyHostToDevice); hipMemcpy(d_b, &B, size, hipMemcpyHostToDevice); // Launch kernel onto the device hipLaunchKernelGGL(( add), dim3(M), dim3(N), 0, 0, d_a, d_b, d_c); // Copy the result back to the host hipMemcpy(&C, d_c, size, hipMemcpyDeviceToHost); // Outpoooot it printf("A:\n"); for (i = 0; i < N; ++i) { for (j = 0; j < M; ++j) { printf("%d\t", A[i * M + j]); } printf("\n"); } printf("\n"); printf("B:\n"); for (i = 0; i < N; ++i) { for (j = 0; j < M; ++j) { printf("%d\t", B[i * M + j]); } printf("\n"); } printf("\n"); printf("A + B:\n"); for (i = 0; i < N; ++i) { for (j = 0; j < M; ++j) { printf("%d\t", C[i * M + j]); } printf("\n"); } printf("\n"); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
9968bc8b510e8da22bae223ce1be149c93e70fe3.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> // It's a M * N matrix #define M 6 #define N 3 // Each element is computed on one thread __global__ void add (int *A, int *B, int *C) { // Get the 1D Array index of the matrix int idx = blockDim.x * blockIdx.x + threadIdx.x; printf("idx = %d\n", idx); C[idx] = A[idx] + B[idx]; } int main () { // Host copies of the variables int A[M * N], B[M * N], C[M * N]; int i, j; for (i = 0; i < M * N; ++i) { A[i] = i + 1; B[i] = M * N - i - 1; } // Device copies of the variables int *d_a, *d_b, *d_c; int size = sizeof(int) * M * N; // Allocate memories to device copies of the objects cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, size); // Copy inputs to device cudaMemcpy(d_a, &A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &B, size, cudaMemcpyHostToDevice); // Launch kernel onto the device add<<<M, N>>>(d_a, d_b, d_c); // Copy the result back to the host cudaMemcpy(&C, d_c, size, cudaMemcpyDeviceToHost); // Outpoooot it printf("A:\n"); for (i = 0; i < N; ++i) { for (j = 0; j < M; ++j) { printf("%d\t", A[i * M + j]); } printf("\n"); } printf("\n"); printf("B:\n"); for (i = 0; i < N; ++i) { for (j = 0; j < M; ++j) { printf("%d\t", B[i * M + j]); } printf("\n"); } printf("\n"); printf("A + B:\n"); for (i = 0; i < N; ++i) { for (j = 0; j < M; ++j) { printf("%d\t", C[i * M + j]); } printf("\n"); } printf("\n"); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
442691743a86cbcb5657cfc1dd9f1c7da4f7e5da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void spmv_csr_kernel(unsigned int dim, unsigned int *csrRowPtr, unsigned int *csrColIdx, float *csrData, float *inVector, float *outVector) { // INSERT KERNEL CODE HERE int rowIdx = blockIdx.x*blockDim.x + threadIdx.x; if(rowIdx<dim){ float dotP = 0.0f; for(int i=csrRowPtr[rowIdx]; i<csrRowPtr[rowIdx+1]; i++){ dotP += csrData[i]*inVector[csrColIdx[i]]; } outVector[rowIdx] = dotP; } } __global__ void spmv_jds_kernel(unsigned int dim, unsigned int *jdsRowPerm, unsigned int *jdsRowNNZ, unsigned int *jdsColStartIdx, unsigned int *jdsColIdx, float *jdsData, float* inVector, float *outVector) { // INSERT KERNEL CODE HERE int rowIdx = blockIdx.x*blockDim.x + threadIdx.x; if(rowIdx<dim){ float dotP = 0.0f; for(int i=0; i<jdsRowNNZ[rowIdx]; i++){ dotP += jdsData[rowIdx+jdsColStartIdx[i]]*inVector[jdsColIdx[rowIdx+jdsColStartIdx[i]]]; } outVector[jdsRowPerm[rowIdx]] = dotP; } } void spmv_csr(unsigned int dim, unsigned int *csrRowPtr, unsigned int *csrColIdx, float *csrData, float *inVector, float *outVector) { // INSERT CODE HERE hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); int maxThreadsPerBlock = prop.maxThreadsPerBlock; hipLaunchKernelGGL(( spmv_csr_kernel), dim3(ceil(dim/(float)maxThreadsPerBlock)), dim3(maxThreadsPerBlock), 0, 0, dim, csrRowPtr, csrColIdx, csrData, inVector, outVector); } void spmv_jds(unsigned int dim, unsigned int *jdsRowPerm, unsigned int *jdsRowNNZ, unsigned int *jdsColStartIdx, unsigned int *jdsColIdx, float *jdsData, float* inVector, float *outVector) { // INSERT CODE HERE hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); int maxThreadsPerBlock = prop.maxThreadsPerBlock; hipLaunchKernelGGL(( spmv_jds_kernel), dim3(ceil(dim/(float)maxThreadsPerBlock)), dim3(maxThreadsPerBlock), 0, 0, dim, jdsRowPerm, jdsRowNNZ, jdsColStartIdx, jdsColIdx, jdsData, inVector, outVector); }
442691743a86cbcb5657cfc1dd9f1c7da4f7e5da.cu
#include <stdio.h> __global__ void spmv_csr_kernel(unsigned int dim, unsigned int *csrRowPtr, unsigned int *csrColIdx, float *csrData, float *inVector, float *outVector) { // INSERT KERNEL CODE HERE int rowIdx = blockIdx.x*blockDim.x + threadIdx.x; if(rowIdx<dim){ float dotP = 0.0f; for(int i=csrRowPtr[rowIdx]; i<csrRowPtr[rowIdx+1]; i++){ dotP += csrData[i]*inVector[csrColIdx[i]]; } outVector[rowIdx] = dotP; } } __global__ void spmv_jds_kernel(unsigned int dim, unsigned int *jdsRowPerm, unsigned int *jdsRowNNZ, unsigned int *jdsColStartIdx, unsigned int *jdsColIdx, float *jdsData, float* inVector, float *outVector) { // INSERT KERNEL CODE HERE int rowIdx = blockIdx.x*blockDim.x + threadIdx.x; if(rowIdx<dim){ float dotP = 0.0f; for(int i=0; i<jdsRowNNZ[rowIdx]; i++){ dotP += jdsData[rowIdx+jdsColStartIdx[i]]*inVector[jdsColIdx[rowIdx+jdsColStartIdx[i]]]; } outVector[jdsRowPerm[rowIdx]] = dotP; } } void spmv_csr(unsigned int dim, unsigned int *csrRowPtr, unsigned int *csrColIdx, float *csrData, float *inVector, float *outVector) { // INSERT CODE HERE cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); int maxThreadsPerBlock = prop.maxThreadsPerBlock; spmv_csr_kernel<<<ceil(dim/(float)maxThreadsPerBlock), maxThreadsPerBlock>>>(dim, csrRowPtr, csrColIdx, csrData, inVector, outVector); } void spmv_jds(unsigned int dim, unsigned int *jdsRowPerm, unsigned int *jdsRowNNZ, unsigned int *jdsColStartIdx, unsigned int *jdsColIdx, float *jdsData, float* inVector, float *outVector) { // INSERT CODE HERE cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); int maxThreadsPerBlock = prop.maxThreadsPerBlock; spmv_jds_kernel<<<ceil(dim/(float)maxThreadsPerBlock), maxThreadsPerBlock>>>(dim, jdsRowPerm, jdsRowNNZ, jdsColStartIdx, jdsColIdx, jdsData, inVector, outVector); }
aed5b981ddd4ab8e0db005bf4408ad7c2f681d44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define DEVICE 0 #define HALO 1 // halo width along one direction when advancing to the next iteration #define BENCH_PRINT void run(int argc, char** argv); int rows, cols; int* data; int** wall; int* result; #define M_SEED 9 int pyramid_height; char* goldfile; void init(int argc, char** argv) { if(argc==5){ cols = atoi(argv[1]); rows = atoi(argv[2]); pyramid_height=atoi(argv[3]); goldfile = argv[4]; }else{ printf("Usage: dynproc row_len col_len pyramid_height\n"); exit(0); } data = new int[rows*cols]; wall = new int*[rows]; for(int n=0; n<rows; n++) wall[n]=data+cols*n; result = new int[cols]; int seed = M_SEED; srand(seed); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { wall[i][j] = rand() % 10; } } #ifdef BENCH_PRINT for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { printf("%d ",wall[i][j]) ; } printf("\n") ; } #endif } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void dynproc_kernel( int iteration, int *gpuWall, int *gpuSrc, int *gpuResults, int cols, int rows, int startStep, int border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; int bx = blockIdx.x; int tx=threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_cols = BLOCK_SIZE-iteration*HALO*2; // calculate the boundary for the block according to // the boundary of its small block int blkX = small_block_cols*bx-border; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int xidx = blkX+tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1; int W = tx-1; int E = tx+1; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool isValid = IN_RANGE(tx, validXmin, validXmax); if(IN_RANGE(xidx, 0, cols-1)){ prev[tx] = gpuSrc[xidx]; } bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ isValid){ computed = true; int left = prev[W]; int up = prev[tx]; int right = prev[E]; int shortest = MIN(left, up); shortest = MIN(shortest, right); int index = cols*(startStep+i)+xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range prev[tx]= result[tx]; } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ gpuResults[xidx]=result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \ int pyramid_height, int blockCols, int borderCols) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; for (int t = 0; t < rows-1; t+=pyramid_height) { int temp = src; src = dst; dst = temp; hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(pyramid_height, rows-t-1), gpuWall, gpuResult[src], gpuResult[dst], cols,rows, t, borderCols); } return dst; } int main(int argc, char** argv) { int num_devices; hipGetDeviceCount(&num_devices); if (num_devices > 1) hipSetDevice(DEVICE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ int borderCols = (pyramid_height)*HALO; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2; int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1); printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\ pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; int size = rows*cols; hipMalloc((void**)&gpuResult[0], sizeof(int)*cols); hipMalloc((void**)&gpuResult[1], sizeof(int)*cols); hipMemcpy(gpuResult[0], data, sizeof(int)*cols, hipMemcpyHostToDevice); hipMalloc((void**)&gpuWall, sizeof(int)*(size-cols)); hipMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), hipMemcpyHostToDevice); int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \ pyramid_height, blockCols, borderCols); hipMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, hipMemcpyDeviceToHost); #ifdef BENCH_PRINT FILE* ofile = fopen("result.txt", "w"); for (int i = 0; i < cols; i++){ printf("%d ",data[i]); fprintf(ofile, "%d ",data[i]); } printf("\n"); fprintf(ofile, "\n"); for (int i = 0; i < cols; i++){ printf("%d ",result[i]); fprintf(ofile, "%d ",result[i]); } printf("\n") ; fprintf(ofile, "\n") ; fclose(ofile); if(goldfile){ FILE *gold = fopen(goldfile, "r"); FILE *result = fopen("result.txt", "r"); int result_error=0; while(!feof(gold)&&!feof(result)){ if (fgetc(gold)!=fgetc(result)) { result_error = 1; break; } } if((feof(gold)^feof(result)) | result_error) { printf("\nFAILED\n"); } else { printf("\nPASSED\n"); } fclose(gold); fclose(result); } #endif hipFree(gpuWall); hipFree(gpuResult[0]); hipFree(gpuResult[1]); delete [] data; delete [] wall; delete [] result; }
aed5b981ddd4ab8e0db005bf4408ad7c2f681d44.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define DEVICE 0 #define HALO 1 // halo width along one direction when advancing to the next iteration #define BENCH_PRINT void run(int argc, char** argv); int rows, cols; int* data; int** wall; int* result; #define M_SEED 9 int pyramid_height; char* goldfile; void init(int argc, char** argv) { if(argc==5){ cols = atoi(argv[1]); rows = atoi(argv[2]); pyramid_height=atoi(argv[3]); goldfile = argv[4]; }else{ printf("Usage: dynproc row_len col_len pyramid_height\n"); exit(0); } data = new int[rows*cols]; wall = new int*[rows]; for(int n=0; n<rows; n++) wall[n]=data+cols*n; result = new int[cols]; int seed = M_SEED; srand(seed); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { wall[i][j] = rand() % 10; } } #ifdef BENCH_PRINT for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { printf("%d ",wall[i][j]) ; } printf("\n") ; } #endif } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void dynproc_kernel( int iteration, int *gpuWall, int *gpuSrc, int *gpuResults, int cols, int rows, int startStep, int border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; int bx = blockIdx.x; int tx=threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_cols = BLOCK_SIZE-iteration*HALO*2; // calculate the boundary for the block according to // the boundary of its small block int blkX = small_block_cols*bx-border; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int xidx = blkX+tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1; int W = tx-1; int E = tx+1; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool isValid = IN_RANGE(tx, validXmin, validXmax); if(IN_RANGE(xidx, 0, cols-1)){ prev[tx] = gpuSrc[xidx]; } bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ isValid){ computed = true; int left = prev[W]; int up = prev[tx]; int right = prev[E]; int shortest = MIN(left, up); shortest = MIN(shortest, right); int index = cols*(startStep+i)+xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range prev[tx]= result[tx]; } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ gpuResults[xidx]=result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \ int pyramid_height, int blockCols, int borderCols) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; for (int t = 0; t < rows-1; t+=pyramid_height) { int temp = src; src = dst; dst = temp; dynproc_kernel<<<dimGrid, dimBlock>>>( MIN(pyramid_height, rows-t-1), gpuWall, gpuResult[src], gpuResult[dst], cols,rows, t, borderCols); } return dst; } int main(int argc, char** argv) { int num_devices; cudaGetDeviceCount(&num_devices); if (num_devices > 1) cudaSetDevice(DEVICE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ int borderCols = (pyramid_height)*HALO; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2; int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1); printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\ pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; int size = rows*cols; cudaMalloc((void**)&gpuResult[0], sizeof(int)*cols); cudaMalloc((void**)&gpuResult[1], sizeof(int)*cols); cudaMemcpy(gpuResult[0], data, sizeof(int)*cols, cudaMemcpyHostToDevice); cudaMalloc((void**)&gpuWall, sizeof(int)*(size-cols)); cudaMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), cudaMemcpyHostToDevice); int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \ pyramid_height, blockCols, borderCols); cudaMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, cudaMemcpyDeviceToHost); #ifdef BENCH_PRINT FILE* ofile = fopen("result.txt", "w"); for (int i = 0; i < cols; i++){ printf("%d ",data[i]); fprintf(ofile, "%d ",data[i]); } printf("\n"); fprintf(ofile, "\n"); for (int i = 0; i < cols; i++){ printf("%d ",result[i]); fprintf(ofile, "%d ",result[i]); } printf("\n") ; fprintf(ofile, "\n") ; fclose(ofile); if(goldfile){ FILE *gold = fopen(goldfile, "r"); FILE *result = fopen("result.txt", "r"); int result_error=0; while(!feof(gold)&&!feof(result)){ if (fgetc(gold)!=fgetc(result)) { result_error = 1; break; } } if((feof(gold)^feof(result)) | result_error) { printf("\nFAILED\n"); } else { printf("\nPASSED\n"); } fclose(gold); fclose(result); } #endif cudaFree(gpuWall); cudaFree(gpuResult[0]); cudaFree(gpuResult[1]); delete [] data; delete [] wall; delete [] result; }
26c84b57b13d441339bd1fdfb34fe44fd711980a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> // setting array and block size #define ARRAY_SIZE 1048576 #define BLOCK_SIZE 1024 // helper function for calculating upper ceil of division int upper_ceil(int numerator, int denominator) { if(numerator % denominator == 0){ return numerator/denominator; } return (numerator/denominator) + 1; } /** * Kernel code to compute the vector reduction sum */ __global__ void vector_sum_reduction(float *device_arr, float *device_sum) { // array in shared memory declaration __shared__ float shared_data[BLOCK_SIZE]; // thread id unsigned int thread_id = threadIdx.x; // index of host array mapped to the thread unsigned int index = blockDim.x * blockIdx.x + thread_id; // initializing shared memory in the block corresponding to this thread shared_data[thread_id] = device_arr[index]; // wait for the all threads to complete filling the array in shared memory __syncthreads(); // setting offsets in multiple of 2 for(int offset = 1; offset < blockDim.x; offset *= 2) { // finding the idx to be incremented unsigned int idx = 2 * thread_id * offset; // check boundary of idx if(idx < blockDim.x) { // incrementing the shared data at index idx by the shared data at an offset /* shared data at idx and shared data at an offset hold the cumulative sum of fixed no of elements to the right and values at these indices */ // refer diagram in the report shared_data[idx] += shared_data[idx + offset]; // now shared_data[idx] contains the sum from element at idx to index of the rightmost element taken into account by shared_data at offset } // making sure all adds at one stage are done __syncthreads(); } // adding the block sums (present at the first index of array in shared data for each block) to the device sum if(thread_id == 0) { atomicAdd(device_sum, shared_data[0]); } } // Main function int main() { // host variables float *host_arr; float *host_sum; // device variables float *device_arr; float *device_sum; // allocate space in host host_arr = (float *) malloc(ARRAY_SIZE * sizeof(float)); host_sum = (float *) malloc(sizeof(float)); (*host_sum) = 0; // initialize host array elements for (int i = 0; i < ARRAY_SIZE; ++i) { host_arr[i] = (float)2; } // allocate device memory with error handling hipError_t err = hipMalloc((void **)&device_arr, ARRAY_SIZE * sizeof(float));; \ if(err != hipSuccess) { printf( "\nError: %s ", hipGetErrorString(err)); return 0; } err = hipMalloc((void **)&device_sum , sizeof(float)); if(err != hipSuccess) { printf( "\nError: %s ", hipGetErrorString(err)); return 0; } // copy host memory data to device hipMemcpy(device_arr, host_arr, ARRAY_SIZE * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(device_sum, host_sum, sizeof(float), hipMemcpyHostToDevice); // initialize thread block and kernel grid dimensions int blocks = upper_ceil(ARRAY_SIZE, BLOCK_SIZE); // invoke CUDA kernel hipLaunchKernelGGL(( vector_sum_reduction), dim3(blocks), dim3(BLOCK_SIZE), 0, 0, device_arr, device_sum); // copy results from device to host hipMemcpy(host_sum, device_sum, sizeof(float), hipMemcpyDeviceToHost); // print result printf("Sum = %f\n", *host_sum); // free device and host memory hipFree(device_arr); hipFree(device_sum); free(host_arr); free(host_sum); return 0; }
26c84b57b13d441339bd1fdfb34fe44fd711980a.cu
#include <stdio.h> #include <cuda.h> // setting array and block size #define ARRAY_SIZE 1048576 #define BLOCK_SIZE 1024 // helper function for calculating upper ceil of division int upper_ceil(int numerator, int denominator) { if(numerator % denominator == 0){ return numerator/denominator; } return (numerator/denominator) + 1; } /** * Kernel code to compute the vector reduction sum */ __global__ void vector_sum_reduction(float *device_arr, float *device_sum) { // array in shared memory declaration __shared__ float shared_data[BLOCK_SIZE]; // thread id unsigned int thread_id = threadIdx.x; // index of host array mapped to the thread unsigned int index = blockDim.x * blockIdx.x + thread_id; // initializing shared memory in the block corresponding to this thread shared_data[thread_id] = device_arr[index]; // wait for the all threads to complete filling the array in shared memory __syncthreads(); // setting offsets in multiple of 2 for(int offset = 1; offset < blockDim.x; offset *= 2) { // finding the idx to be incremented unsigned int idx = 2 * thread_id * offset; // check boundary of idx if(idx < blockDim.x) { // incrementing the shared data at index idx by the shared data at an offset /* shared data at idx and shared data at an offset hold the cumulative sum of fixed no of elements to the right and values at these indices */ // refer diagram in the report shared_data[idx] += shared_data[idx + offset]; // now shared_data[idx] contains the sum from element at idx to index of the rightmost element taken into account by shared_data at offset } // making sure all adds at one stage are done __syncthreads(); } // adding the block sums (present at the first index of array in shared data for each block) to the device sum if(thread_id == 0) { atomicAdd(device_sum, shared_data[0]); } } // Main function int main() { // host variables float *host_arr; float *host_sum; // device variables float *device_arr; float *device_sum; // allocate space in host host_arr = (float *) malloc(ARRAY_SIZE * sizeof(float)); host_sum = (float *) malloc(sizeof(float)); (*host_sum) = 0; // initialize host array elements for (int i = 0; i < ARRAY_SIZE; ++i) { host_arr[i] = (float)2; } // allocate device memory with error handling cudaError_t err = cudaMalloc((void **)&device_arr, ARRAY_SIZE * sizeof(float));; \ if(err != cudaSuccess) { printf( "\nError: %s ", cudaGetErrorString(err)); return 0; } err = cudaMalloc((void **)&device_sum , sizeof(float)); if(err != cudaSuccess) { printf( "\nError: %s ", cudaGetErrorString(err)); return 0; } // copy host memory data to device cudaMemcpy(device_arr, host_arr, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(device_sum, host_sum, sizeof(float), cudaMemcpyHostToDevice); // initialize thread block and kernel grid dimensions int blocks = upper_ceil(ARRAY_SIZE, BLOCK_SIZE); // invoke CUDA kernel vector_sum_reduction<<<blocks, BLOCK_SIZE>>>(device_arr, device_sum); // copy results from device to host cudaMemcpy(host_sum, device_sum, sizeof(float), cudaMemcpyDeviceToHost); // print result printf("Sum = %f\n", *host_sum); // free device and host memory cudaFree(device_arr); cudaFree(device_sum); free(host_arr); free(host_sum); return 0; }
b076c28f05222eb38afe59c2ee7524f3490d93c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2019,2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/top_k_grad.hpp> #include <nbla/cuda/utils/top_k.cuh> #include <nbla/variable.hpp> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/sort.h> namespace nbla { namespace top_k_grad { template <typename T> __global__ void set_to_zero(const int size, T *data) { NBLA_CUDA_KERNEL_LOOP(i, size) { data[i] = 0; } } template <typename T> __global__ void set_to_absolute(const int size, T *data) { NBLA_CUDA_KERNEL_LOOP(i, size) { data[i] = abs(data[i]); } } template <typename T> __global__ void add_gradient(const int k, const ValIdx<T> *sorted, const T *y_grad, T *x_grad) { NBLA_CUDA_KERNEL_LOOP(i, k) { const auto idx = sorted[i].index(); x_grad[idx] += y_grad[idx]; } } template <typename T> __global__ void set_gradient(const int k, const ValIdx<T> *sorted, const T *y_grad, T *x_grad) { NBLA_CUDA_KERNEL_LOOP(i, k) { const auto idx = sorted[i].index(); x_grad[idx] = y_grad[idx]; } } template <typename T> __global__ void add_gradient(const int k, const unsigned int *sorted_idx, const T *y_grad, T *x_grad) { NBLA_CUDA_KERNEL_LOOP(i, k) { const auto idx = sorted_idx[i]; x_grad[idx] += y_grad[idx]; } } template <typename T> __global__ void set_gradient(const int k, const unsigned int *sorted_idx, const T *y_grad, T *x_grad) { NBLA_CUDA_KERNEL_LOOP(i, k) { const auto idx = sorted_idx[i]; x_grad[idx] = y_grad[idx]; } } } // namespace top_k_grad template <typename T> void TopKGradCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { TopKGrad<T>::setup_impl(inputs, outputs); cuda_set_device(this->device_); if (this->k_ > 1024) { this->buffer_.reshape(Shape_t{outputs[0]->size(this->base_axis_)}, true); } else { this->buffer_.reshape(Shape_t{static_cast<Size_t>(sizeof(Buffer<Tcu>))}, true); } } template <typename T> void TopKGradCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); const auto x = inputs[0]; const auto y = outputs[0]; auto x_data = x->data()->get(get_dtype<Tcu>(), this->ctx_); auto y_data = y->data()->cast(get_dtype<Tcu>(), this->ctx_, true); y_data->copy_from(x_data); } template <typename T> void TopKGradCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0])) return; using namespace top_k_grad; cuda_set_device(this->device_); const auto x = inputs[0]; const auto y = outputs[0]; auto y_grad = y->get_grad_pointer<Tcu>(this->ctx_); auto x_grad = x->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]); auto idx = (reinterpret_cast<Variable &>(this->top_k_idx_) .get_data_pointer<unsigned int>(this->ctx_)); if (!accum[0]) NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_to_zero, x->size(), x_grad); auto inner_size = y->size(this->base_axis_); auto outer_size = y->size() / inner_size; if (this->k_ > 1024) { // For large K we use thrust sort_by_key to do a radix sort of // data and index. This is not very efficient but large K is not // he expected use case. The code could be splitting the input // into a smaller partition of the k-th largest values before // sorting. auto buffer_raw = this->buffer_.cast(get_dtype<unsigned int>(), this->ctx_, true) ->template pointer<unsigned int>(); auto buffer_ptr = thrust::device_pointer_cast(buffer_raw); for (int s = 0; s < outer_size; s++) { auto y_grad_vec = thrust::device_vector<Tcu>(y_grad, y_grad + inner_size); auto sorted_val = thrust::raw_pointer_cast(y_grad_vec.data()); auto sorted_idx = thrust::raw_pointer_cast(buffer_ptr); if (this->abs_) { auto raw_ptr = thrust::raw_pointer_cast(y_grad_vec.data()); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_to_absolute, inner_size, raw_ptr); } thrust::sequence(buffer_ptr, buffer_ptr + inner_size); thrust::sort_by_key(y_grad_vec.begin(), y_grad_vec.end(), buffer_ptr, thrust::greater<Tcu>()); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(add_gradient, this->k_, sorted_idx, y_grad, x_grad); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_gradient, this->k_, sorted_idx, y_grad, x_grad); } y_grad += inner_size; x_grad += inner_size; } } else { auto buffer = this->buffer_.cast(get_dtype<char>(), this->ctx_, true) ->template pointer<Buffer<Tcu>>(); for (int s = 0; s < outer_size; s++) { if (this->abs_) { top_k<Tcu, true>(y_grad, inner_size, this->k_, buffer); } else { top_k<Tcu, false>(y_grad, inner_size, this->k_, buffer); } if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(add_gradient, this->k_, &buffer->sorted[0], y_grad, x_grad); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_gradient, this->k_, &buffer->sorted[0], y_grad, x_grad); } y_grad += inner_size; x_grad += inner_size; } } } } // namespace nbla
b076c28f05222eb38afe59c2ee7524f3490d93c8.cu
// Copyright 2019,2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/top_k_grad.hpp> #include <nbla/cuda/utils/top_k.cuh> #include <nbla/variable.hpp> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/sort.h> namespace nbla { namespace top_k_grad { template <typename T> __global__ void set_to_zero(const int size, T *data) { NBLA_CUDA_KERNEL_LOOP(i, size) { data[i] = 0; } } template <typename T> __global__ void set_to_absolute(const int size, T *data) { NBLA_CUDA_KERNEL_LOOP(i, size) { data[i] = abs(data[i]); } } template <typename T> __global__ void add_gradient(const int k, const ValIdx<T> *sorted, const T *y_grad, T *x_grad) { NBLA_CUDA_KERNEL_LOOP(i, k) { const auto idx = sorted[i].index(); x_grad[idx] += y_grad[idx]; } } template <typename T> __global__ void set_gradient(const int k, const ValIdx<T> *sorted, const T *y_grad, T *x_grad) { NBLA_CUDA_KERNEL_LOOP(i, k) { const auto idx = sorted[i].index(); x_grad[idx] = y_grad[idx]; } } template <typename T> __global__ void add_gradient(const int k, const unsigned int *sorted_idx, const T *y_grad, T *x_grad) { NBLA_CUDA_KERNEL_LOOP(i, k) { const auto idx = sorted_idx[i]; x_grad[idx] += y_grad[idx]; } } template <typename T> __global__ void set_gradient(const int k, const unsigned int *sorted_idx, const T *y_grad, T *x_grad) { NBLA_CUDA_KERNEL_LOOP(i, k) { const auto idx = sorted_idx[i]; x_grad[idx] = y_grad[idx]; } } } // namespace top_k_grad template <typename T> void TopKGradCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { TopKGrad<T>::setup_impl(inputs, outputs); cuda_set_device(this->device_); if (this->k_ > 1024) { this->buffer_.reshape(Shape_t{outputs[0]->size(this->base_axis_)}, true); } else { this->buffer_.reshape(Shape_t{static_cast<Size_t>(sizeof(Buffer<Tcu>))}, true); } } template <typename T> void TopKGradCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); const auto x = inputs[0]; const auto y = outputs[0]; auto x_data = x->data()->get(get_dtype<Tcu>(), this->ctx_); auto y_data = y->data()->cast(get_dtype<Tcu>(), this->ctx_, true); y_data->copy_from(x_data); } template <typename T> void TopKGradCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0])) return; using namespace top_k_grad; cuda_set_device(this->device_); const auto x = inputs[0]; const auto y = outputs[0]; auto y_grad = y->get_grad_pointer<Tcu>(this->ctx_); auto x_grad = x->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]); auto idx = (reinterpret_cast<Variable &>(this->top_k_idx_) .get_data_pointer<unsigned int>(this->ctx_)); if (!accum[0]) NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_to_zero, x->size(), x_grad); auto inner_size = y->size(this->base_axis_); auto outer_size = y->size() / inner_size; if (this->k_ > 1024) { // For large K we use thrust sort_by_key to do a radix sort of // data and index. This is not very efficient but large K is not // he expected use case. The code could be splitting the input // into a smaller partition of the k-th largest values before // sorting. auto buffer_raw = this->buffer_.cast(get_dtype<unsigned int>(), this->ctx_, true) ->template pointer<unsigned int>(); auto buffer_ptr = thrust::device_pointer_cast(buffer_raw); for (int s = 0; s < outer_size; s++) { auto y_grad_vec = thrust::device_vector<Tcu>(y_grad, y_grad + inner_size); auto sorted_val = thrust::raw_pointer_cast(y_grad_vec.data()); auto sorted_idx = thrust::raw_pointer_cast(buffer_ptr); if (this->abs_) { auto raw_ptr = thrust::raw_pointer_cast(y_grad_vec.data()); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_to_absolute, inner_size, raw_ptr); } thrust::sequence(buffer_ptr, buffer_ptr + inner_size); thrust::sort_by_key(y_grad_vec.begin(), y_grad_vec.end(), buffer_ptr, thrust::greater<Tcu>()); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(add_gradient, this->k_, sorted_idx, y_grad, x_grad); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_gradient, this->k_, sorted_idx, y_grad, x_grad); } y_grad += inner_size; x_grad += inner_size; } } else { auto buffer = this->buffer_.cast(get_dtype<char>(), this->ctx_, true) ->template pointer<Buffer<Tcu>>(); for (int s = 0; s < outer_size; s++) { if (this->abs_) { top_k<Tcu, true>(y_grad, inner_size, this->k_, buffer); } else { top_k<Tcu, false>(y_grad, inner_size, this->k_, buffer); } if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(add_gradient, this->k_, &buffer->sorted[0], y_grad, x_grad); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_gradient, this->k_, &buffer->sorted[0], y_grad, x_grad); } y_grad += inner_size; x_grad += inner_size; } } } } // namespace nbla
98495b658dfca7c6288ccaa327d35b45b8ed26df.hip
// !!! This is a file automatically generated by hipify!!! #include "cudaSplitElements.h" #include "cudaSplitEncsegs.h" #include "cudaFlipFlop.h" #include "cudaMesh.h" void splitElements( Real2D &t_pointlist, PStatusD &t_PStatus, IntD &t_segmentlist, IntD &t_subseg2tri, IntD &t_subseg2seg, IntD &t_encmarker, IntD &t_trianglelist, IntD &t_neighborlist, IntD &t_tri2subseg, TStatusD &t_TStatus, IntD &t_emptypoints, IntD &t_emptytriangles, int pointblock, int triblock, int * numberofemptypoints, int * numberofemptytriangles, int * numberofpoints, int * numberoftriangles, int * numberofsubseg, int offconstant, int offcenter, int encmode, int filtermode, int unifymode, REAL theta, REAL size ) { printf("Splitting bad elements....\n"); IntD t_badelementlist; IntD t_badsubseglist, t_badtrianglelist; IntD t_threadlist; #ifdef GQM2D_PRIORITY_SIZE RealD t_pointpriority(t_PStatus.size(), 0.0); UInt64D t_trimarker(t_TStatus.size()); #else IntD t_trimarker(t_TStatus.size()); #endif IntD t_flipBy(t_TStatus.size()); // Record subsegments that were encroached // and are not encroached because their // diametral circles have been cleared. // Such subsegments need to be split IntD t_segmarker(*numberofsubseg, -1); Real2D t_insertpt; IntD t_sinks; #ifdef GQM2D_PRIORITY_SIZE RealD t_priorityreal; IntD t_priority; #endif int numberofbadelements; int numberofbadsubsegs, numberofbadtriangles; int numberofwonsegs; int numberofsteiners; int numberofonsegs; int numberofblocks; int iteration = 0; int iter_numofpt = thrust::count_if(t_PStatus.begin(), t_PStatus.end(), isNotDeleted()); #ifndef GQM2D_QUIET printf(" iteration -1: number of points = %d, 0\n", iter_numofpt); #endif #ifdef GQM2D_CHECKMEMORY hipDeviceSynchronize(); gpuMemoryCheck(); #endif StopWatchInterface *iter_timer = 0; sdkCreateTimer(&iter_timer); // Reset timer hipDeviceSynchronize(); sdkResetTimer(&iter_timer); sdkStartTimer(&iter_timer); #ifdef GQM2D_ITER_PROFILING clock_t tv[2]; int npt[2]; tv[0] = clock(); #endif while (true) { #ifdef GQM2D_ITER_PROFILING npt[0] = thrust::count_if(t_PStatus.begin(), t_PStatus.end(), isNotDeleted()); #endif // Compute bad element list numberofbadsubsegs = updateActiveListByMarker_Slot(t_encmarker, t_badsubseglist, *numberofsubseg); numberofbadtriangles = updateActiveListToBadTriangles( t_pointlist, t_PStatus, t_trianglelist, t_neighborlist, t_segmentlist, t_subseg2seg, t_tri2subseg, t_TStatus, t_threadlist, // temporarily used t_badtrianglelist, *numberoftriangles, theta, size); if (unifymode == 0) // do not split subsegments and triangles together { if (numberofbadsubsegs > 0) numberofbadtriangles = 0; } numberofbadelements = numberofbadsubsegs + numberofbadtriangles; if (numberofbadelements == 0) break; #ifndef GQM2D_QUIET printf(" \niteration %d: #%d bad elements (#%d subsegs, #%d triangles)\n", iteration, numberofbadelements, numberofbadsubsegs, numberofbadtriangles); #endif #ifdef GQM2D_CHECKMEMORY hipDeviceSynchronize(); gpuMemoryCheck(); #endif t_badelementlist.resize(numberofbadelements); thrust::copy_n(t_badsubseglist.begin(), numberofbadsubsegs, t_badelementlist.begin()); thrust::copy_n(t_badtrianglelist.begin(), numberofbadtriangles, t_badelementlist.begin() + numberofbadsubsegs); t_insertpt.resize(numberofbadelements); t_sinks.resize(numberofbadelements); #ifdef GQM2D_PRIORITY_SIZE t_priorityreal.resize(numberofbadelements); t_priority.resize(numberofbadelements); #endif // Compute splitting points and priorites numberofblocks = (ceil)((float)numberofbadelements / BLOCK_SIZE); kernelComputeSplittingPointAndPriority << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_badelementlist[0]), numberofbadelements, numberofbadsubsegs, thrust::raw_pointer_cast(&t_insertpt[0]), #ifdef GQM2D_PRIORITY_SIZE thrust::raw_pointer_cast(&t_priorityreal[0]), #endif offconstant, offcenter); #ifdef GQM2D_DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif #ifdef GQM2D_PRIORITY_SIZE // Modify priorities and convert them into integers // Make sure subseg > triangle double priority_min[2], priority_max[2], priority_offset[2] = { 0, 0 }; thrust::pair<RealD::iterator, RealD::iterator> priority_pair; if (numberofbadtriangles > 0) { priority_pair = thrust::minmax_element( t_priorityreal.begin() + numberofbadsubsegs, t_priorityreal.end()); priority_min[1] = *priority_pair.first; priority_max[1] = *priority_pair.second; priority_offset[1] = 0; #ifdef GQM2D_DEBUG printf("MinMax Real priorities for triangles: %lf, %lf\n", priority_min[1], priority_max[1]); printf("Offset: %lf\n", priority_offset[1]); #endif } if (numberofbadsubsegs > 0) { priority_pair = thrust::minmax_element( t_priorityreal.begin(), t_priorityreal.begin() + numberofbadsubsegs); priority_min[0] = *priority_pair.first; priority_max[0] = *priority_pair.second; if (numberofbadtriangles > 0) priority_offset[0] = priority_max[1] + priority_offset[1] + 10 - priority_min[0]; else priority_offset[0] = 0; #ifdef GQM2D_DEBUG printf("MinMax Real priorities for subsegs: %lf, %lf\n", priority_min[0], priority_max[0]); printf("Offset: %lf\n", priority_offset[0]); #endif } kernelModifyPriority << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_priorityreal[0]), thrust::raw_pointer_cast(&t_priority[0]), priority_offset[0], priority_offset[1], thrust::raw_pointer_cast(&t_badelementlist[0]), numberofbadelements, numberofbadsubsegs); #ifdef GQM2D_DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); if (numberofbadsubsegs > 0) { priority_pair = thrust::minmax_element( t_priorityreal.begin(), t_priorityreal.begin() + numberofbadsubsegs); priority_min[0] = *priority_pair.first; priority_max[0] = *priority_pair.second; printf("MinMax Real priorities for subsegs after modification: %lf, %lf\n", priority_min[0], priority_max[0]); } if (numberofbadtriangles > 0) { priority_pair = thrust::minmax_element( t_priorityreal.begin() + numberofbadsubsegs, t_priorityreal.end()); priority_min[1] = *priority_pair.first; priority_max[1] = *priority_pair.second; printf("MinMax Real priorities for triangles after modification: %lf, %lf\n", priority_min[1], priority_max[1]); } #endif #endif // Locate splitting and do marking competition #ifdef GQM2D_PRIORITY_SIZE thrust::fill(t_trimarker.begin(), t_trimarker.begin() + *numberoftriangles, 0); #else thrust::fill(t_trimarker.begin(), t_trimarker.begin() + *numberoftriangles, MAXINT); #endif kernelLocateSplittingPoints << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_insertpt[0]), thrust::raw_pointer_cast(&t_trimarker[0]), #ifdef GQM2D_PRIORITY_SIZE thrust::raw_pointer_cast(&t_priority[0]), #endif thrust::raw_pointer_cast(&t_sinks[0]), thrust::raw_pointer_cast(&t_badelementlist[0]), numberofbadelements, numberofbadsubsegs); #ifdef GQM2D_DEBUG //printf("number of winner after kernelLocateSplittingPoints = %d\n", // thrust::count_if(t_badelementlist.begin(), t_badelementlist.end(), isNotNegativeInt())); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif if (filtermode) { // Fast cavities checking to avoid unnecessary point insertions // and encroachment kernelFastCavityCheck << <numberofblocks, BLOCK_SIZE >> > ( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_insertpt[0]), thrust::raw_pointer_cast(&t_trimarker[0]), #ifdef GQM2D_PRIORITY_SIZE thrust::raw_pointer_cast(&t_priority[0]), #endif thrust::raw_pointer_cast(&t_sinks[0]), thrust::raw_pointer_cast(&t_badelementlist[0]), numberofbadelements); #ifdef GQM2D_DEBUG //printf("number of winner after kernelFastCavityCheck = %d\n", // thrust::count_if(t_badelementlist.begin(), t_badelementlist.end(), isNotNegativeInt())); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif } // Mark subsegs who contain splitting points of winners as bad subsegs in t_segmarker kernelMarkOnSegs << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_segmarker[0]), thrust::raw_pointer_cast(&t_sinks[0]), thrust::raw_pointer_cast(&t_badelementlist[0]), numberofbadelements, numberofbadsubsegs); #ifdef GQM2D_DEBUG //printf("number of winner after kernelMarkOnSegs = %d\n", // thrust::count_if(t_badelementlist.begin(), t_badelementlist.end(), isNotNegativeInt())); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif numberofwonsegs = thrust::count_if(t_badelementlist.begin(), t_badelementlist.begin() + numberofbadsubsegs, isNotNegativeInt()); numberofsteiners = updateActiveListByMarker_Slot(t_badelementlist, t_threadlist, numberofbadelements); #ifdef GQM2D_DEBUG printf("numberofsteiners = %d, numberofwonsegs = %d\n", numberofsteiners, numberofwonsegs); #endif if (numberofsteiners == 0) goto END; // Prepare memory for new elements if (numberofsteiners > *numberofemptypoints) { *numberofemptypoints = updateEmptyPoints(t_PStatus, t_emptypoints); int num = 0; while (numberofsteiners > *numberofemptypoints + num*pointblock) num++; if (num != 0) { int old_size = t_PStatus.size(); PStatus emptyPoint; emptyPoint.setDeleted(); t_pointlist.resize(old_size + num*pointblock); t_PStatus.resize(old_size + num*pointblock, emptyPoint); #ifdef GQM2D_PRIORITY_SIZE t_pointpriority.resize(old_size + num*pointblock, 0.0); #endif *numberofemptypoints = updateEmptyPoints(t_PStatus, t_emptypoints); } } if (2 * numberofsteiners > *numberofemptytriangles) { *numberofemptytriangles = updateEmptyTriangles(t_TStatus, t_emptytriangles); int num = 0; while (2 * numberofsteiners > *numberofemptytriangles + num*triblock) num++; if (num != 0) { int old_size = t_TStatus.size(); TStatus emptyTri(true, false, false); t_trianglelist.resize(3 * (old_size + num*triblock)); t_neighborlist.resize(3 * (old_size + num*triblock)); t_tri2subseg.resize(3 * (old_size + num*triblock), -1); t_TStatus.resize(old_size + num*triblock, emptyTri); t_trimarker.resize(old_size + num*triblock); t_flipBy.resize(old_size + num*triblock); *numberofemptytriangles = updateEmptyTriangles(t_TStatus, t_emptytriangles); } } t_subseg2tri.resize(*numberofsubseg + numberofwonsegs); t_subseg2seg.resize(*numberofsubseg + numberofwonsegs); t_encmarker.resize(*numberofsubseg + numberofwonsegs, -1); t_segmarker.resize(*numberofsubseg + numberofwonsegs, -1); // Insert splitting point numberofblocks = (ceil)((float)numberofsteiners / BLOCK_SIZE); kernelResetNeighborMarker << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_sinks[0]), thrust::raw_pointer_cast(&t_threadlist[0]), numberofsteiners); #ifdef GQM2D_DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif kernelInsertSplittingPoints << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_PStatus[0]), #ifdef GQM2D_PRIORITY_SIZE thrust::raw_pointer_cast(&t_pointpriority[0]), thrust::raw_pointer_cast(&t_priorityreal[0]), #endif thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_subseg2seg[0]), thrust::raw_pointer_cast(&t_encmarker[0]), thrust::raw_pointer_cast(&t_segmarker[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_insertpt[0]), thrust::raw_pointer_cast(&t_sinks[0]), thrust::raw_pointer_cast(&t_emptypoints[0]), thrust::raw_pointer_cast(&t_emptytriangles[0]), thrust::raw_pointer_cast(&t_badelementlist[0]), thrust::raw_pointer_cast(&t_threadlist[0]), t_emptypoints.size(), t_emptytriangles.size(), *numberofemptypoints, *numberofemptytriangles, *numberofsubseg, numberofwonsegs, numberofsteiners, encmode, theta); #ifdef GQM2D_DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif kernelUpdateNeighbors << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_PStatus[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_encmarker[0]), thrust::raw_pointer_cast(&t_segmarker[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_sinks[0]), thrust::raw_pointer_cast(&t_emptypoints[0]), thrust::raw_pointer_cast(&t_threadlist[0]), t_emptypoints.size(), *numberofemptypoints, numberofwonsegs, numberofsteiners, encmode, theta); #ifdef GQM2D_DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif // Update iteration variables // (1) check if there are any slots before last points/triangles // (2) update last points/triangles/subsegs // (3) update number of empty points/triangles int slot_before, slot_after; // point slots slot_after = t_PStatus.size() - *numberofpoints; slot_before = *numberofemptypoints - slot_after; if (slot_before < numberofsteiners) *numberofpoints += numberofsteiners - slot_before; *numberofemptypoints -= numberofsteiners; // triangle slots slot_after = t_TStatus.size() - *numberoftriangles; slot_before = *numberofemptytriangles - slot_after; if (slot_before < 2 * numberofsteiners) *numberoftriangles += 2 * numberofsteiners - slot_before; *numberofemptytriangles -= 2 * numberofsteiners; // subseg *numberofsubseg += numberofwonsegs; #ifdef GQM2D_DEBUG iter_numofpt = thrust::count_if(t_PStatus.begin(), t_PStatus.end(), isNotDeleted()); printf(" Number of points before flipFlop = %d\n", iter_numofpt); #endif // Maintain denauly property, do flip-flop flipFlop( t_pointlist, t_PStatus, #ifdef GQM2D_PRIORITY_SIZE t_pointpriority, #endif t_trianglelist, t_neighborlist, t_tri2subseg, t_TStatus, t_subseg2tri, t_flipBy, // flipBy t_sinks, // flipActive t_encmarker, t_segmarker, t_threadlist, // linklist t_badelementlist, // linkslot *numberoftriangles, encmode, theta, -1, -1); #ifdef GQM2D_DEBUG_2 printf("non-negative in segmarker = %d\n", thrust::count_if(t_segmarker.begin(), t_segmarker.end(), isNotNegativeInt())); #endif // mark bad subsegs as encroached subsegs using t_segmarker END: if (*numberofsubseg > 0) { numberofblocks = (ceil)((float)(*numberofsubseg) / BLOCK_SIZE); markBadSubsegsAsEncroached << <numberofblocks, BLOCK_SIZE >> > ( thrust::raw_pointer_cast(&t_encmarker[0]), thrust::raw_pointer_cast(&t_segmarker[0]), *numberofsubseg); #ifdef GQM2D_DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif } numberofblocks = (ceil)((float)(*numberofpoints) / BLOCK_SIZE); kernelUpdatePStatus2Old << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_PStatus[0]), *numberofpoints); #ifdef GQM2D_DEBUG iter_numofpt = thrust::count_if(t_PStatus.begin(), t_PStatus.end(), isNotDeleted()); printf(" Number of points after flipFlop = %d\n", iter_numofpt); #endif #ifdef GQM2D_ITER_PROFILING hipDeviceSynchronize(); tv[1] = clock(); npt[1] = thrust::count_if(t_PStatus.begin(), t_PStatus.end(), isNotDeleted()); printf("%d, %lf, %d, %d\n", iteration, (REAL)(tv[1] - tv[0]), numberofbadelements, npt[1] - npt[0]); #endif iteration++; } // Get timer. hipDeviceSynchronize(); sdkStopTimer(&iter_timer); printf("1. Split bad elements time = %.3f ms\n", sdkGetTimerValue(&iter_timer)); }
98495b658dfca7c6288ccaa327d35b45b8ed26df.cu
#include "cudaSplitElements.h" #include "cudaSplitEncsegs.h" #include "cudaFlipFlop.h" #include "cudaMesh.h" void splitElements( Real2D &t_pointlist, PStatusD &t_PStatus, IntD &t_segmentlist, IntD &t_subseg2tri, IntD &t_subseg2seg, IntD &t_encmarker, IntD &t_trianglelist, IntD &t_neighborlist, IntD &t_tri2subseg, TStatusD &t_TStatus, IntD &t_emptypoints, IntD &t_emptytriangles, int pointblock, int triblock, int * numberofemptypoints, int * numberofemptytriangles, int * numberofpoints, int * numberoftriangles, int * numberofsubseg, int offconstant, int offcenter, int encmode, int filtermode, int unifymode, REAL theta, REAL size ) { printf("Splitting bad elements....\n"); IntD t_badelementlist; IntD t_badsubseglist, t_badtrianglelist; IntD t_threadlist; #ifdef GQM2D_PRIORITY_SIZE RealD t_pointpriority(t_PStatus.size(), 0.0); UInt64D t_trimarker(t_TStatus.size()); #else IntD t_trimarker(t_TStatus.size()); #endif IntD t_flipBy(t_TStatus.size()); // Record subsegments that were encroached // and are not encroached because their // diametral circles have been cleared. // Such subsegments need to be split IntD t_segmarker(*numberofsubseg, -1); Real2D t_insertpt; IntD t_sinks; #ifdef GQM2D_PRIORITY_SIZE RealD t_priorityreal; IntD t_priority; #endif int numberofbadelements; int numberofbadsubsegs, numberofbadtriangles; int numberofwonsegs; int numberofsteiners; int numberofonsegs; int numberofblocks; int iteration = 0; int iter_numofpt = thrust::count_if(t_PStatus.begin(), t_PStatus.end(), isNotDeleted()); #ifndef GQM2D_QUIET printf(" iteration -1: number of points = %d, 0\n", iter_numofpt); #endif #ifdef GQM2D_CHECKMEMORY cudaDeviceSynchronize(); gpuMemoryCheck(); #endif StopWatchInterface *iter_timer = 0; sdkCreateTimer(&iter_timer); // Reset timer cudaDeviceSynchronize(); sdkResetTimer(&iter_timer); sdkStartTimer(&iter_timer); #ifdef GQM2D_ITER_PROFILING clock_t tv[2]; int npt[2]; tv[0] = clock(); #endif while (true) { #ifdef GQM2D_ITER_PROFILING npt[0] = thrust::count_if(t_PStatus.begin(), t_PStatus.end(), isNotDeleted()); #endif // Compute bad element list numberofbadsubsegs = updateActiveListByMarker_Slot(t_encmarker, t_badsubseglist, *numberofsubseg); numberofbadtriangles = updateActiveListToBadTriangles( t_pointlist, t_PStatus, t_trianglelist, t_neighborlist, t_segmentlist, t_subseg2seg, t_tri2subseg, t_TStatus, t_threadlist, // temporarily used t_badtrianglelist, *numberoftriangles, theta, size); if (unifymode == 0) // do not split subsegments and triangles together { if (numberofbadsubsegs > 0) numberofbadtriangles = 0; } numberofbadelements = numberofbadsubsegs + numberofbadtriangles; if (numberofbadelements == 0) break; #ifndef GQM2D_QUIET printf(" \niteration %d: #%d bad elements (#%d subsegs, #%d triangles)\n", iteration, numberofbadelements, numberofbadsubsegs, numberofbadtriangles); #endif #ifdef GQM2D_CHECKMEMORY cudaDeviceSynchronize(); gpuMemoryCheck(); #endif t_badelementlist.resize(numberofbadelements); thrust::copy_n(t_badsubseglist.begin(), numberofbadsubsegs, t_badelementlist.begin()); thrust::copy_n(t_badtrianglelist.begin(), numberofbadtriangles, t_badelementlist.begin() + numberofbadsubsegs); t_insertpt.resize(numberofbadelements); t_sinks.resize(numberofbadelements); #ifdef GQM2D_PRIORITY_SIZE t_priorityreal.resize(numberofbadelements); t_priority.resize(numberofbadelements); #endif // Compute splitting points and priorites numberofblocks = (ceil)((float)numberofbadelements / BLOCK_SIZE); kernelComputeSplittingPointAndPriority << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_badelementlist[0]), numberofbadelements, numberofbadsubsegs, thrust::raw_pointer_cast(&t_insertpt[0]), #ifdef GQM2D_PRIORITY_SIZE thrust::raw_pointer_cast(&t_priorityreal[0]), #endif offconstant, offcenter); #ifdef GQM2D_DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif #ifdef GQM2D_PRIORITY_SIZE // Modify priorities and convert them into integers // Make sure subseg > triangle double priority_min[2], priority_max[2], priority_offset[2] = { 0, 0 }; thrust::pair<RealD::iterator, RealD::iterator> priority_pair; if (numberofbadtriangles > 0) { priority_pair = thrust::minmax_element( t_priorityreal.begin() + numberofbadsubsegs, t_priorityreal.end()); priority_min[1] = *priority_pair.first; priority_max[1] = *priority_pair.second; priority_offset[1] = 0; #ifdef GQM2D_DEBUG printf("MinMax Real priorities for triangles: %lf, %lf\n", priority_min[1], priority_max[1]); printf("Offset: %lf\n", priority_offset[1]); #endif } if (numberofbadsubsegs > 0) { priority_pair = thrust::minmax_element( t_priorityreal.begin(), t_priorityreal.begin() + numberofbadsubsegs); priority_min[0] = *priority_pair.first; priority_max[0] = *priority_pair.second; if (numberofbadtriangles > 0) priority_offset[0] = priority_max[1] + priority_offset[1] + 10 - priority_min[0]; else priority_offset[0] = 0; #ifdef GQM2D_DEBUG printf("MinMax Real priorities for subsegs: %lf, %lf\n", priority_min[0], priority_max[0]); printf("Offset: %lf\n", priority_offset[0]); #endif } kernelModifyPriority << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_priorityreal[0]), thrust::raw_pointer_cast(&t_priority[0]), priority_offset[0], priority_offset[1], thrust::raw_pointer_cast(&t_badelementlist[0]), numberofbadelements, numberofbadsubsegs); #ifdef GQM2D_DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); if (numberofbadsubsegs > 0) { priority_pair = thrust::minmax_element( t_priorityreal.begin(), t_priorityreal.begin() + numberofbadsubsegs); priority_min[0] = *priority_pair.first; priority_max[0] = *priority_pair.second; printf("MinMax Real priorities for subsegs after modification: %lf, %lf\n", priority_min[0], priority_max[0]); } if (numberofbadtriangles > 0) { priority_pair = thrust::minmax_element( t_priorityreal.begin() + numberofbadsubsegs, t_priorityreal.end()); priority_min[1] = *priority_pair.first; priority_max[1] = *priority_pair.second; printf("MinMax Real priorities for triangles after modification: %lf, %lf\n", priority_min[1], priority_max[1]); } #endif #endif // Locate splitting and do marking competition #ifdef GQM2D_PRIORITY_SIZE thrust::fill(t_trimarker.begin(), t_trimarker.begin() + *numberoftriangles, 0); #else thrust::fill(t_trimarker.begin(), t_trimarker.begin() + *numberoftriangles, MAXINT); #endif kernelLocateSplittingPoints << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_insertpt[0]), thrust::raw_pointer_cast(&t_trimarker[0]), #ifdef GQM2D_PRIORITY_SIZE thrust::raw_pointer_cast(&t_priority[0]), #endif thrust::raw_pointer_cast(&t_sinks[0]), thrust::raw_pointer_cast(&t_badelementlist[0]), numberofbadelements, numberofbadsubsegs); #ifdef GQM2D_DEBUG //printf("number of winner after kernelLocateSplittingPoints = %d\n", // thrust::count_if(t_badelementlist.begin(), t_badelementlist.end(), isNotNegativeInt())); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif if (filtermode) { // Fast cavities checking to avoid unnecessary point insertions // and encroachment kernelFastCavityCheck << <numberofblocks, BLOCK_SIZE >> > ( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_insertpt[0]), thrust::raw_pointer_cast(&t_trimarker[0]), #ifdef GQM2D_PRIORITY_SIZE thrust::raw_pointer_cast(&t_priority[0]), #endif thrust::raw_pointer_cast(&t_sinks[0]), thrust::raw_pointer_cast(&t_badelementlist[0]), numberofbadelements); #ifdef GQM2D_DEBUG //printf("number of winner after kernelFastCavityCheck = %d\n", // thrust::count_if(t_badelementlist.begin(), t_badelementlist.end(), isNotNegativeInt())); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif } // Mark subsegs who contain splitting points of winners as bad subsegs in t_segmarker kernelMarkOnSegs << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_segmarker[0]), thrust::raw_pointer_cast(&t_sinks[0]), thrust::raw_pointer_cast(&t_badelementlist[0]), numberofbadelements, numberofbadsubsegs); #ifdef GQM2D_DEBUG //printf("number of winner after kernelMarkOnSegs = %d\n", // thrust::count_if(t_badelementlist.begin(), t_badelementlist.end(), isNotNegativeInt())); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif numberofwonsegs = thrust::count_if(t_badelementlist.begin(), t_badelementlist.begin() + numberofbadsubsegs, isNotNegativeInt()); numberofsteiners = updateActiveListByMarker_Slot(t_badelementlist, t_threadlist, numberofbadelements); #ifdef GQM2D_DEBUG printf("numberofsteiners = %d, numberofwonsegs = %d\n", numberofsteiners, numberofwonsegs); #endif if (numberofsteiners == 0) goto END; // Prepare memory for new elements if (numberofsteiners > *numberofemptypoints) { *numberofemptypoints = updateEmptyPoints(t_PStatus, t_emptypoints); int num = 0; while (numberofsteiners > *numberofemptypoints + num*pointblock) num++; if (num != 0) { int old_size = t_PStatus.size(); PStatus emptyPoint; emptyPoint.setDeleted(); t_pointlist.resize(old_size + num*pointblock); t_PStatus.resize(old_size + num*pointblock, emptyPoint); #ifdef GQM2D_PRIORITY_SIZE t_pointpriority.resize(old_size + num*pointblock, 0.0); #endif *numberofemptypoints = updateEmptyPoints(t_PStatus, t_emptypoints); } } if (2 * numberofsteiners > *numberofemptytriangles) { *numberofemptytriangles = updateEmptyTriangles(t_TStatus, t_emptytriangles); int num = 0; while (2 * numberofsteiners > *numberofemptytriangles + num*triblock) num++; if (num != 0) { int old_size = t_TStatus.size(); TStatus emptyTri(true, false, false); t_trianglelist.resize(3 * (old_size + num*triblock)); t_neighborlist.resize(3 * (old_size + num*triblock)); t_tri2subseg.resize(3 * (old_size + num*triblock), -1); t_TStatus.resize(old_size + num*triblock, emptyTri); t_trimarker.resize(old_size + num*triblock); t_flipBy.resize(old_size + num*triblock); *numberofemptytriangles = updateEmptyTriangles(t_TStatus, t_emptytriangles); } } t_subseg2tri.resize(*numberofsubseg + numberofwonsegs); t_subseg2seg.resize(*numberofsubseg + numberofwonsegs); t_encmarker.resize(*numberofsubseg + numberofwonsegs, -1); t_segmarker.resize(*numberofsubseg + numberofwonsegs, -1); // Insert splitting point numberofblocks = (ceil)((float)numberofsteiners / BLOCK_SIZE); kernelResetNeighborMarker << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_sinks[0]), thrust::raw_pointer_cast(&t_threadlist[0]), numberofsteiners); #ifdef GQM2D_DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif kernelInsertSplittingPoints << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_PStatus[0]), #ifdef GQM2D_PRIORITY_SIZE thrust::raw_pointer_cast(&t_pointpriority[0]), thrust::raw_pointer_cast(&t_priorityreal[0]), #endif thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_subseg2seg[0]), thrust::raw_pointer_cast(&t_encmarker[0]), thrust::raw_pointer_cast(&t_segmarker[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_insertpt[0]), thrust::raw_pointer_cast(&t_sinks[0]), thrust::raw_pointer_cast(&t_emptypoints[0]), thrust::raw_pointer_cast(&t_emptytriangles[0]), thrust::raw_pointer_cast(&t_badelementlist[0]), thrust::raw_pointer_cast(&t_threadlist[0]), t_emptypoints.size(), t_emptytriangles.size(), *numberofemptypoints, *numberofemptytriangles, *numberofsubseg, numberofwonsegs, numberofsteiners, encmode, theta); #ifdef GQM2D_DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif kernelUpdateNeighbors << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_PStatus[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_encmarker[0]), thrust::raw_pointer_cast(&t_segmarker[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_sinks[0]), thrust::raw_pointer_cast(&t_emptypoints[0]), thrust::raw_pointer_cast(&t_threadlist[0]), t_emptypoints.size(), *numberofemptypoints, numberofwonsegs, numberofsteiners, encmode, theta); #ifdef GQM2D_DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif // Update iteration variables // (1) check if there are any slots before last points/triangles // (2) update last points/triangles/subsegs // (3) update number of empty points/triangles int slot_before, slot_after; // point slots slot_after = t_PStatus.size() - *numberofpoints; slot_before = *numberofemptypoints - slot_after; if (slot_before < numberofsteiners) *numberofpoints += numberofsteiners - slot_before; *numberofemptypoints -= numberofsteiners; // triangle slots slot_after = t_TStatus.size() - *numberoftriangles; slot_before = *numberofemptytriangles - slot_after; if (slot_before < 2 * numberofsteiners) *numberoftriangles += 2 * numberofsteiners - slot_before; *numberofemptytriangles -= 2 * numberofsteiners; // subseg *numberofsubseg += numberofwonsegs; #ifdef GQM2D_DEBUG iter_numofpt = thrust::count_if(t_PStatus.begin(), t_PStatus.end(), isNotDeleted()); printf(" Number of points before flipFlop = %d\n", iter_numofpt); #endif // Maintain denauly property, do flip-flop flipFlop( t_pointlist, t_PStatus, #ifdef GQM2D_PRIORITY_SIZE t_pointpriority, #endif t_trianglelist, t_neighborlist, t_tri2subseg, t_TStatus, t_subseg2tri, t_flipBy, // flipBy t_sinks, // flipActive t_encmarker, t_segmarker, t_threadlist, // linklist t_badelementlist, // linkslot *numberoftriangles, encmode, theta, -1, -1); #ifdef GQM2D_DEBUG_2 printf("non-negative in segmarker = %d\n", thrust::count_if(t_segmarker.begin(), t_segmarker.end(), isNotNegativeInt())); #endif // mark bad subsegs as encroached subsegs using t_segmarker END: if (*numberofsubseg > 0) { numberofblocks = (ceil)((float)(*numberofsubseg) / BLOCK_SIZE); markBadSubsegsAsEncroached << <numberofblocks, BLOCK_SIZE >> > ( thrust::raw_pointer_cast(&t_encmarker[0]), thrust::raw_pointer_cast(&t_segmarker[0]), *numberofsubseg); #ifdef GQM2D_DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif } numberofblocks = (ceil)((float)(*numberofpoints) / BLOCK_SIZE); kernelUpdatePStatus2Old << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_PStatus[0]), *numberofpoints); #ifdef GQM2D_DEBUG iter_numofpt = thrust::count_if(t_PStatus.begin(), t_PStatus.end(), isNotDeleted()); printf(" Number of points after flipFlop = %d\n", iter_numofpt); #endif #ifdef GQM2D_ITER_PROFILING cudaDeviceSynchronize(); tv[1] = clock(); npt[1] = thrust::count_if(t_PStatus.begin(), t_PStatus.end(), isNotDeleted()); printf("%d, %lf, %d, %d\n", iteration, (REAL)(tv[1] - tv[0]), numberofbadelements, npt[1] - npt[0]); #endif iteration++; } // Get timer. cudaDeviceSynchronize(); sdkStopTimer(&iter_timer); printf("1. Split bad elements time = %.3f ms\n", sdkGetTimerValue(&iter_timer)); }
fbf540a57075173312a2b20c082edad79a519c7a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernel_add_regularization_term(double * d_input_vector, int dimension, double regularization_parameter, double * d_rv) { if (threadIdx.x == 0) { double sum = 0; for (int i = 1; i < dimension; ++i) { sum += 0.5 * d_input_vector[i] * d_input_vector[i] * regularization_parameter; } *d_rv += sum; } }
fbf540a57075173312a2b20c082edad79a519c7a.cu
#include "includes.h" __global__ void kernel_add_regularization_term(double * d_input_vector, int dimension, double regularization_parameter, double * d_rv) { if (threadIdx.x == 0) { double sum = 0; for (int i = 1; i < dimension; ++i) { sum += 0.5 * d_input_vector[i] * d_input_vector[i] * regularization_parameter; } *d_rv += sum; } }
1ee2c9fb9b084cc539e0a11d50d86b902342eb6c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include "params.h" __device__ int getIndex(int t_x, int t_y) { // calculate full index from a grid position int indx = __mul24(t_y,blockDim.x) + t_x; return __mul24(blockDim.y, __mul24(blockIdx.x, blockDim.x)) + indx; } __device__ int getIndex(int t_x) { // calculate full index from a grid position return __mul24(blockDim.y, __mul24(blockIdx.x, blockDim.x)) + t_x; } __global__ void d_initRands(hiprandState_t *state, int seed) { int id = getIndex(threadIdx.x, threadIdx.y); /* Each thread gets same seed, a different sequence * number, no offset */ hiprand_init(seed, id, 0, &state[id]); } __global__ void d_updateStates(int* states, float* wg, int N_x, hiprandState_t* d_rands, int NL, int t) { int id = getIndex(threadIdx.x, threadIdx.y); int edges=80; int neigh[8][2] = { { 1, 1 }, { 1, 0 }, { 1, -1 } , { 0, 1 }, { 0, -1 }, { -1, -1 } , { -1, 0 }, { -1, 1 } }; int deltan = 0; int bl = blockIdx.x; int N = N_x*N_x; int myInd = threadIdx.y*N_x + threadIdx.x; //generate random permutation array int permList[8] = {0,1,2,3,4,5,6,7}; int perm[8] ;//= {0,1,2,3,4,5,6,7}; /* for (int e=0;e<edges;e++) { int n = hiprand_uniform(&d_rands[id])*8; if (n==8) n==7; bool up = (hiprand_uniform(&d_rands[id])>0.5); while (permList[n]<0) { if (up) n++; else n--; if (n<0) n=7; if (n>7) n=0; } perm[e]=permList[n]; permList[n]=-1; } // */ for (int e=0;e<edges;e++) { int distance = ceil(float(NL)*hiprand_uniform(&d_rands[id])); if (hiprand_uniform(&d_rands[id])>0.5) distance = -distance; int n2 = (((myInd + distance) % N) + N) % N; int n2_id = getIndex(n2); if (states[n2_id]>0.5) deltan++; } // if (t>1) // if (states[id]<0.5) // printf("%d %d %0.3f %d \n", t, myInd, deltan/80.0, states[id]); bool debug = false; if ((debug)&&(id==0)) { int sCount = 0; for (int x_n=0;x_n<N_x;x_n++) for (int y_n=0;y_n<N_x;y_n++) { int n2_id = getIndex(x_n, y_n); if (states[n2_id]>0.5) sCount++; } printf("%d %d %d %d \n",t, deltan, sCount, states[id]); } // deltan is N+ right now but we want (N+ - N-) deltan*=2; deltan-=edges; float cue = 1.0f + ( hiprand_normal(&d_rands[id]) * sqrtf(1.0f/(2.0f*wg[id])) ) ; float pup = exp(-4.0f*wg[id]*cue); float pall = pup*powf((1.0f - ws)/ws,deltan); int newState; if (pall<1.0f) newState = 1; else newState = 0; __syncthreads(); states[id] = newState; } __global__ void d_recordData(int* states, int* states2, hiprandState_t* d_rands, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t) { int group_id = threadIdx.y * N_x + threadIdx.x; int N = N_x*N_x; if ((group_id==0)&&(blockIdx.x==0)) for (int b=0;b<gridDim.x;b++) { if (t==0) for (int i=0;i<N;i++) states2[b * N + i] = states[b * N + i]; else { int totalUp = 0; for (int i=0;i<N;i++) if (states2[b * N + i] > 0.5) totalUp++; int nowDown = 0; for (int i=0;i<N;i++) if ((states2[b * N + i] > 0.5)&&(states[b * N + i] < 0.5)) nowDown++; int nowUp = 0; for (int i=0;i<N;i++) if ((states2[b * N + i] < 0.5)&&(states[b * N + i] > 0.5)) nowUp++; d_upcount[totalUp]+=1; int c = d_upcount[totalUp]; // printf("%d %d %d %d\n",t, totalUp,nowDown, nowUp); d_down[totalUp] = (nowDown/(float)N)/(float)c + (c-1)*d_down[totalUp]/(float)c; d_up[totalUp] = (nowUp/(float)N)/(float)c + (c-1)*d_up[totalUp]/(float)c; // res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1); // now for something crazy!!! // we're going to count all the uppies and then put them all in order totalUp=0; for (int i=0;i<N;i++) { if (states[b * N + i] > 0.5) totalUp++; // states[b * N + i] = 0; } // totalUp=32; /* int nc = 0.875 * totalUp; float frac = float(totalUp-nc)/float(N-totalUp); for (int i=0;i<nc;i++) states[b * N + i] = 1; for (int i=nc;i<N;i++) if (hiprand_uniform(&d_rands[group_id])< frac) states[b * N + i] = 1; */ // int i2 = totalUp + 0.5*(N-totalUp); // states[b * N + i2] = 1; // for (int i=0;i<N;i++) states2[b * N + i] = states[b * N + i]; } //res[t * gridDim.y + blockIdx.y] = counter; // if (t==0) // res[blockIdx.y] = counter; // else // res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1); } } __global__ void block_sum(const int *input, int *per_block_results, const size_t n) { extern __shared__ int sdata[]; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // load input into __shared__ memory int x = 0; if(i < n) { x = input[i]; } sdata[threadIdx.x] = x; __syncthreads(); // contiguous range pattern for(int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if(threadIdx.x < offset) { // add a partial sum upstream to our own sdata[threadIdx.x] += sdata[threadIdx.x + offset]; } // wait until all threads in the block hav // updated their partial sums __syncthreads(); } // thread 0 writes the final result if(threadIdx.x == 0) { per_block_results[blockIdx.x] = sdata[0]; } } void initRands(dim3 threadGrid, int numBlocks, hiprandState_t *state, int seed) { hipLaunchKernelGGL(( d_initRands), dim3(numBlocks), dim3(threadGrid) , 0, 0, state, seed); if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" ); } void advanceTimestep(dim3 threadGrid, int numBlocks, hiprandState_t *rands, float* wg, int* states, int N_x, int NL, int t) { hipLaunchKernelGGL(( d_updateStates), dim3(numBlocks), dim3(threadGrid) , 0, 0, states, wg, N_x, rands, NL, t); if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" ); } void recordData(dim3 threadGrid, int numBlocks, int* states, int* states2, hiprandState_t *rands, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t) { hipLaunchKernelGGL(( d_recordData), dim3(numBlocks), dim3(threadGrid) , 0, 0, states, states2, rands, N_x, d_up, d_down, d_upcount, d_downcount, t); if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" ); } void countStates(int numThreads, int numBlocks, int* states, int* blockTotals, int N_ALL) { hipLaunchKernelGGL(( block_sum), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(int) , 0, states, blockTotals, N_ALL); if (hipSuccess != hipGetLastError()) printf( "cuda error!\n" ); }
1ee2c9fb9b084cc539e0a11d50d86b902342eb6c.cu
#include <curand_kernel.h> #include <stdio.h> #include "params.h" __device__ int getIndex(int t_x, int t_y) { // calculate full index from a grid position int indx = __mul24(t_y,blockDim.x) + t_x; return __mul24(blockDim.y, __mul24(blockIdx.x, blockDim.x)) + indx; } __device__ int getIndex(int t_x) { // calculate full index from a grid position return __mul24(blockDim.y, __mul24(blockIdx.x, blockDim.x)) + t_x; } __global__ void d_initRands(curandState *state, int seed) { int id = getIndex(threadIdx.x, threadIdx.y); /* Each thread gets same seed, a different sequence * number, no offset */ curand_init(seed, id, 0, &state[id]); } __global__ void d_updateStates(int* states, float* wg, int N_x, curandState* d_rands, int NL, int t) { int id = getIndex(threadIdx.x, threadIdx.y); int edges=80; int neigh[8][2] = { { 1, 1 }, { 1, 0 }, { 1, -1 } , { 0, 1 }, { 0, -1 }, { -1, -1 } , { -1, 0 }, { -1, 1 } }; int deltan = 0; int bl = blockIdx.x; int N = N_x*N_x; int myInd = threadIdx.y*N_x + threadIdx.x; //generate random permutation array int permList[8] = {0,1,2,3,4,5,6,7}; int perm[8] ;//= {0,1,2,3,4,5,6,7}; /* for (int e=0;e<edges;e++) { int n = curand_uniform(&d_rands[id])*8; if (n==8) n==7; bool up = (curand_uniform(&d_rands[id])>0.5); while (permList[n]<0) { if (up) n++; else n--; if (n<0) n=7; if (n>7) n=0; } perm[e]=permList[n]; permList[n]=-1; } // */ for (int e=0;e<edges;e++) { int distance = ceil(float(NL)*curand_uniform(&d_rands[id])); if (curand_uniform(&d_rands[id])>0.5) distance = -distance; int n2 = (((myInd + distance) % N) + N) % N; int n2_id = getIndex(n2); if (states[n2_id]>0.5) deltan++; } // if (t>1) // if (states[id]<0.5) // printf("%d %d %0.3f %d \n", t, myInd, deltan/80.0, states[id]); bool debug = false; if ((debug)&&(id==0)) { int sCount = 0; for (int x_n=0;x_n<N_x;x_n++) for (int y_n=0;y_n<N_x;y_n++) { int n2_id = getIndex(x_n, y_n); if (states[n2_id]>0.5) sCount++; } printf("%d %d %d %d \n",t, deltan, sCount, states[id]); } // deltan is N+ right now but we want (N+ - N-) deltan*=2; deltan-=edges; float cue = 1.0f + ( curand_normal(&d_rands[id]) * sqrtf(1.0f/(2.0f*wg[id])) ) ; float pup = exp(-4.0f*wg[id]*cue); float pall = pup*powf((1.0f - ws)/ws,deltan); int newState; if (pall<1.0f) newState = 1; else newState = 0; __syncthreads(); states[id] = newState; } __global__ void d_recordData(int* states, int* states2, curandState* d_rands, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t) { int group_id = threadIdx.y * N_x + threadIdx.x; int N = N_x*N_x; if ((group_id==0)&&(blockIdx.x==0)) for (int b=0;b<gridDim.x;b++) { if (t==0) for (int i=0;i<N;i++) states2[b * N + i] = states[b * N + i]; else { int totalUp = 0; for (int i=0;i<N;i++) if (states2[b * N + i] > 0.5) totalUp++; int nowDown = 0; for (int i=0;i<N;i++) if ((states2[b * N + i] > 0.5)&&(states[b * N + i] < 0.5)) nowDown++; int nowUp = 0; for (int i=0;i<N;i++) if ((states2[b * N + i] < 0.5)&&(states[b * N + i] > 0.5)) nowUp++; d_upcount[totalUp]+=1; int c = d_upcount[totalUp]; // printf("%d %d %d %d\n",t, totalUp,nowDown, nowUp); d_down[totalUp] = (nowDown/(float)N)/(float)c + (c-1)*d_down[totalUp]/(float)c; d_up[totalUp] = (nowUp/(float)N)/(float)c + (c-1)*d_up[totalUp]/(float)c; // res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1); // now for something crazy!!! // we're going to count all the uppies and then put them all in order totalUp=0; for (int i=0;i<N;i++) { if (states[b * N + i] > 0.5) totalUp++; // states[b * N + i] = 0; } // totalUp=32; /* int nc = 0.875 * totalUp; float frac = float(totalUp-nc)/float(N-totalUp); for (int i=0;i<nc;i++) states[b * N + i] = 1; for (int i=nc;i<N;i++) if (curand_uniform(&d_rands[group_id])< frac) states[b * N + i] = 1; */ // int i2 = totalUp + 0.5*(N-totalUp); // states[b * N + i2] = 1; // for (int i=0;i<N;i++) states2[b * N + i] = states[b * N + i]; } //res[t * gridDim.y + blockIdx.y] = counter; // if (t==0) // res[blockIdx.y] = counter; // else // res[blockIdx.y] = counter/float(t+1) + t*res[blockIdx.y]/float(t+1); } } __global__ void block_sum(const int *input, int *per_block_results, const size_t n) { extern __shared__ int sdata[]; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // load input into __shared__ memory int x = 0; if(i < n) { x = input[i]; } sdata[threadIdx.x] = x; __syncthreads(); // contiguous range pattern for(int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if(threadIdx.x < offset) { // add a partial sum upstream to our own sdata[threadIdx.x] += sdata[threadIdx.x + offset]; } // wait until all threads in the block hav // updated their partial sums __syncthreads(); } // thread 0 writes the final result if(threadIdx.x == 0) { per_block_results[blockIdx.x] = sdata[0]; } } void initRands(dim3 threadGrid, int numBlocks, curandState *state, int seed) { d_initRands<<< numBlocks, threadGrid >>>(state, seed); if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" ); } void advanceTimestep(dim3 threadGrid, int numBlocks, curandState *rands, float* wg, int* states, int N_x, int NL, int t) { d_updateStates<<< numBlocks, threadGrid >>>(states, wg, N_x, rands, NL, t); if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" ); } void recordData(dim3 threadGrid, int numBlocks, int* states, int* states2, curandState *rands, int N_x, float* d_up, float* d_down, int* d_upcount, int* d_downcount, int t) { d_recordData<<< numBlocks, threadGrid >>>(states, states2, rands, N_x, d_up, d_down, d_upcount, d_downcount, t); if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" ); } void countStates(int numThreads, int numBlocks, int* states, int* blockTotals, int N_ALL) { block_sum<<< numBlocks, numThreads, numThreads * sizeof(int) >>>(states, blockTotals, N_ALL); if (cudaSuccess != cudaGetLastError()) printf( "cuda error!\n" ); }
e600f20e46d34827487afa7f67907c6fbc8a7ddd.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2019-2020 XGBoost contributors */ #include <xgboost/base.h> #include <utility> #include "../helpers.h" #include "../histogram_helpers.h" #include "gtest/gtest.h" #include "../../../src/common/categorical.h" #include "../../../src/common/hist_util.h" #include "../../../src/data/ellpack_page.cuh" namespace xgboost { TEST(EllpackPage, EmptyDMatrix) { constexpr int kNRows = 0, kNCols = 0, kMaxBin = 256; constexpr float kSparsity = 0; auto dmat = RandomDataGenerator(kNRows, kNCols, kSparsity).GenerateDMatrix(); auto& page = *dmat->GetBatches<EllpackPage>({0, kMaxBin}).begin(); auto impl = page.Impl(); ASSERT_EQ(impl->row_stride, 0); ASSERT_EQ(impl->Cuts().TotalBins(), 0); ASSERT_EQ(impl->gidx_buffer.Size(), 4); } TEST(EllpackPage, BuildGidxDense) { int constexpr kNRows = 16, kNCols = 8; auto page = BuildEllpackPage(kNRows, kNCols); std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector()); common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), page->NumSymbols()); ASSERT_EQ(page->row_stride, kNCols); std::vector<uint32_t> solution = { 0, 3, 8, 9, 14, 17, 20, 21, 0, 4, 7, 10, 14, 16, 19, 22, 1, 3, 7, 11, 14, 15, 19, 21, 2, 3, 7, 9, 13, 16, 20, 22, 2, 3, 6, 9, 12, 16, 20, 21, 1, 5, 6, 10, 13, 16, 20, 21, 2, 5, 8, 9, 13, 17, 19, 22, 2, 4, 6, 10, 14, 17, 19, 21, 2, 5, 7, 9, 13, 16, 19, 22, 0, 3, 8, 10, 12, 16, 19, 22, 1, 3, 7, 10, 13, 16, 19, 21, 1, 3, 8, 10, 13, 17, 20, 22, 2, 4, 6, 9, 14, 15, 19, 22, 1, 4, 6, 9, 13, 16, 19, 21, 2, 4, 8, 10, 14, 15, 19, 22, 1, 4, 7, 10, 14, 16, 19, 21, }; for (size_t i = 0; i < kNRows * kNCols; ++i) { ASSERT_EQ(solution[i], gidx[i]); } } TEST(EllpackPage, BuildGidxSparse) { int constexpr kNRows = 16, kNCols = 8; auto page = BuildEllpackPage(kNRows, kNCols, 0.9f); std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector()); common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25); ASSERT_LE(page->row_stride, 3); // row_stride = 3, 16 rows, 48 entries for ELLPack std::vector<uint32_t> solution = { 15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24, 24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24, 24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24 }; for (size_t i = 0; i < kNRows * page->row_stride; ++i) { ASSERT_EQ(solution[i], gidx[i]); } } TEST(EllpackPage, FromCategoricalBasic) { using common::AsCat; size_t constexpr kRows = 1000, kCats = 13, kCols = 1; size_t max_bins = 8; auto x = GenerateRandomCategoricalSingleColumn(kRows, kCats); auto m = GetDMatrixFromData(x, kRows, 1); auto& h_ft = m->Info().feature_types.HostVector(); h_ft.resize(kCols, FeatureType::kCategorical); BatchParam p(0, max_bins); auto ellpack = EllpackPage(m.get(), p); auto accessor = ellpack.Impl()->GetDeviceAccessor(0); ASSERT_EQ(kCats, accessor.NumBins()); auto x_copy = x; std::sort(x_copy.begin(), x_copy.end()); auto n_uniques = std::unique(x_copy.begin(), x_copy.end()) - x_copy.begin(); ASSERT_EQ(n_uniques, kCats); std::vector<uint32_t> h_cuts_ptr(accessor.feature_segments.size()); dh::CopyDeviceSpanToVector(&h_cuts_ptr, accessor.feature_segments); std::vector<float> h_cuts_values(accessor.gidx_fvalue_map.size()); dh::CopyDeviceSpanToVector(&h_cuts_values, accessor.gidx_fvalue_map); ASSERT_EQ(h_cuts_ptr.size(), 2); ASSERT_EQ(h_cuts_values.size(), kCats); std::vector<common::CompressedByteT> const &h_gidx_buffer = ellpack.Impl()->gidx_buffer.HostVector(); auto h_gidx_iter = common::CompressedIterator<uint32_t>( h_gidx_buffer.data(), accessor.NumSymbols()); for (size_t i = 0; i < x.size(); ++i) { auto bin = h_gidx_iter[i]; auto bin_value = h_cuts_values.at(bin); ASSERT_EQ(AsCat(x[i]), AsCat(bin_value)); } } struct ReadRowFunction { EllpackDeviceAccessor matrix; int row; bst_float* row_data_d; ReadRowFunction(EllpackDeviceAccessor matrix, int row, bst_float* row_data_d) : matrix(std::move(matrix)), row(row), row_data_d(row_data_d) {} __device__ void operator()(size_t col) { auto value = matrix.GetFvalue(row, col); if (isnan(value)) { value = -1; } row_data_d[col] = value; } }; TEST(EllpackPage, Copy) { constexpr size_t kRows = 1024; constexpr size_t kCols = 16; constexpr size_t kPageSize = 1024; // Create a DMatrix with multiple batches. dmlc::TemporaryDirectory tmpdir; std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir)); BatchParam param{0, 256, kPageSize}; auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl(); // Create an empty result page. EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride, kRows); // Copy batch pages into the result page. size_t offset = 0; for (auto& batch : dmat->GetBatches<EllpackPage>(param)) { size_t num_elements = result.Copy(0, batch.Impl(), offset); offset += num_elements; } size_t current_row = 0; thrust::device_vector<bst_float> row_d(kCols); thrust::device_vector<bst_float> row_result_d(kCols); std::vector<bst_float> row(kCols); std::vector<bst_float> row_result(kCols); for (auto& page : dmat->GetBatches<EllpackPage>(param)) { auto impl = page.Impl(); EXPECT_EQ(impl->base_rowid, current_row); for (size_t i = 0; i < impl->Size(); i++) { dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(0), current_row, row_d.data().get())); thrust::copy(row_d.begin(), row_d.end(), row.begin()); dh::LaunchN(kCols, ReadRowFunction(result.GetDeviceAccessor(0), current_row, row_result_d.data().get())); thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin()); EXPECT_EQ(row, row_result); current_row++; } } } TEST(EllpackPage, Compact) { constexpr size_t kRows = 16; constexpr size_t kCols = 2; constexpr size_t kPageSize = 1; constexpr size_t kCompactedRows = 8; // Create a DMatrix with multiple batches. dmlc::TemporaryDirectory tmpdir; std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir)); BatchParam param{0, 256, kPageSize}; auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl(); // Create an empty result page. EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride, kCompactedRows); // Compact batch pages into the result page. std::vector<size_t> row_indexes_h { SIZE_MAX, 0, 1, 2, SIZE_MAX, 3, SIZE_MAX, 4, 5, SIZE_MAX, 6, SIZE_MAX, 7, SIZE_MAX, SIZE_MAX, SIZE_MAX}; thrust::device_vector<size_t> row_indexes_d = row_indexes_h; common::Span<size_t> row_indexes_span(row_indexes_d.data().get(), kRows); for (auto& batch : dmat->GetBatches<EllpackPage>(param)) { result.Compact(0, batch.Impl(), row_indexes_span); } size_t current_row = 0; thrust::device_vector<bst_float> row_d(kCols); thrust::device_vector<bst_float> row_result_d(kCols); std::vector<bst_float> row(kCols); std::vector<bst_float> row_result(kCols); for (auto& page : dmat->GetBatches<EllpackPage>(param)) { auto impl = page.Impl(); EXPECT_EQ(impl->base_rowid, current_row); for (size_t i = 0; i < impl->Size(); i++) { size_t compacted_row = row_indexes_h[current_row]; if (compacted_row == SIZE_MAX) { current_row++; continue; } dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(0), current_row, row_d.data().get())); dh::safe_cuda(hipDeviceSynchronize()); thrust::copy(row_d.begin(), row_d.end(), row.begin()); dh::LaunchN(kCols, ReadRowFunction(result.GetDeviceAccessor(0), compacted_row, row_result_d.data().get())); thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin()); EXPECT_EQ(row, row_result); current_row++; } } } } // namespace xgboost
e600f20e46d34827487afa7f67907c6fbc8a7ddd.cu
/*! * Copyright 2019-2020 XGBoost contributors */ #include <xgboost/base.h> #include <utility> #include "../helpers.h" #include "../histogram_helpers.h" #include "gtest/gtest.h" #include "../../../src/common/categorical.h" #include "../../../src/common/hist_util.h" #include "../../../src/data/ellpack_page.cuh" namespace xgboost { TEST(EllpackPage, EmptyDMatrix) { constexpr int kNRows = 0, kNCols = 0, kMaxBin = 256; constexpr float kSparsity = 0; auto dmat = RandomDataGenerator(kNRows, kNCols, kSparsity).GenerateDMatrix(); auto& page = *dmat->GetBatches<EllpackPage>({0, kMaxBin}).begin(); auto impl = page.Impl(); ASSERT_EQ(impl->row_stride, 0); ASSERT_EQ(impl->Cuts().TotalBins(), 0); ASSERT_EQ(impl->gidx_buffer.Size(), 4); } TEST(EllpackPage, BuildGidxDense) { int constexpr kNRows = 16, kNCols = 8; auto page = BuildEllpackPage(kNRows, kNCols); std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector()); common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), page->NumSymbols()); ASSERT_EQ(page->row_stride, kNCols); std::vector<uint32_t> solution = { 0, 3, 8, 9, 14, 17, 20, 21, 0, 4, 7, 10, 14, 16, 19, 22, 1, 3, 7, 11, 14, 15, 19, 21, 2, 3, 7, 9, 13, 16, 20, 22, 2, 3, 6, 9, 12, 16, 20, 21, 1, 5, 6, 10, 13, 16, 20, 21, 2, 5, 8, 9, 13, 17, 19, 22, 2, 4, 6, 10, 14, 17, 19, 21, 2, 5, 7, 9, 13, 16, 19, 22, 0, 3, 8, 10, 12, 16, 19, 22, 1, 3, 7, 10, 13, 16, 19, 21, 1, 3, 8, 10, 13, 17, 20, 22, 2, 4, 6, 9, 14, 15, 19, 22, 1, 4, 6, 9, 13, 16, 19, 21, 2, 4, 8, 10, 14, 15, 19, 22, 1, 4, 7, 10, 14, 16, 19, 21, }; for (size_t i = 0; i < kNRows * kNCols; ++i) { ASSERT_EQ(solution[i], gidx[i]); } } TEST(EllpackPage, BuildGidxSparse) { int constexpr kNRows = 16, kNCols = 8; auto page = BuildEllpackPage(kNRows, kNCols, 0.9f); std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector()); common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25); ASSERT_LE(page->row_stride, 3); // row_stride = 3, 16 rows, 48 entries for ELLPack std::vector<uint32_t> solution = { 15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24, 24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24, 24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24 }; for (size_t i = 0; i < kNRows * page->row_stride; ++i) { ASSERT_EQ(solution[i], gidx[i]); } } TEST(EllpackPage, FromCategoricalBasic) { using common::AsCat; size_t constexpr kRows = 1000, kCats = 13, kCols = 1; size_t max_bins = 8; auto x = GenerateRandomCategoricalSingleColumn(kRows, kCats); auto m = GetDMatrixFromData(x, kRows, 1); auto& h_ft = m->Info().feature_types.HostVector(); h_ft.resize(kCols, FeatureType::kCategorical); BatchParam p(0, max_bins); auto ellpack = EllpackPage(m.get(), p); auto accessor = ellpack.Impl()->GetDeviceAccessor(0); ASSERT_EQ(kCats, accessor.NumBins()); auto x_copy = x; std::sort(x_copy.begin(), x_copy.end()); auto n_uniques = std::unique(x_copy.begin(), x_copy.end()) - x_copy.begin(); ASSERT_EQ(n_uniques, kCats); std::vector<uint32_t> h_cuts_ptr(accessor.feature_segments.size()); dh::CopyDeviceSpanToVector(&h_cuts_ptr, accessor.feature_segments); std::vector<float> h_cuts_values(accessor.gidx_fvalue_map.size()); dh::CopyDeviceSpanToVector(&h_cuts_values, accessor.gidx_fvalue_map); ASSERT_EQ(h_cuts_ptr.size(), 2); ASSERT_EQ(h_cuts_values.size(), kCats); std::vector<common::CompressedByteT> const &h_gidx_buffer = ellpack.Impl()->gidx_buffer.HostVector(); auto h_gidx_iter = common::CompressedIterator<uint32_t>( h_gidx_buffer.data(), accessor.NumSymbols()); for (size_t i = 0; i < x.size(); ++i) { auto bin = h_gidx_iter[i]; auto bin_value = h_cuts_values.at(bin); ASSERT_EQ(AsCat(x[i]), AsCat(bin_value)); } } struct ReadRowFunction { EllpackDeviceAccessor matrix; int row; bst_float* row_data_d; ReadRowFunction(EllpackDeviceAccessor matrix, int row, bst_float* row_data_d) : matrix(std::move(matrix)), row(row), row_data_d(row_data_d) {} __device__ void operator()(size_t col) { auto value = matrix.GetFvalue(row, col); if (isnan(value)) { value = -1; } row_data_d[col] = value; } }; TEST(EllpackPage, Copy) { constexpr size_t kRows = 1024; constexpr size_t kCols = 16; constexpr size_t kPageSize = 1024; // Create a DMatrix with multiple batches. dmlc::TemporaryDirectory tmpdir; std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir)); BatchParam param{0, 256, kPageSize}; auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl(); // Create an empty result page. EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride, kRows); // Copy batch pages into the result page. size_t offset = 0; for (auto& batch : dmat->GetBatches<EllpackPage>(param)) { size_t num_elements = result.Copy(0, batch.Impl(), offset); offset += num_elements; } size_t current_row = 0; thrust::device_vector<bst_float> row_d(kCols); thrust::device_vector<bst_float> row_result_d(kCols); std::vector<bst_float> row(kCols); std::vector<bst_float> row_result(kCols); for (auto& page : dmat->GetBatches<EllpackPage>(param)) { auto impl = page.Impl(); EXPECT_EQ(impl->base_rowid, current_row); for (size_t i = 0; i < impl->Size(); i++) { dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(0), current_row, row_d.data().get())); thrust::copy(row_d.begin(), row_d.end(), row.begin()); dh::LaunchN(kCols, ReadRowFunction(result.GetDeviceAccessor(0), current_row, row_result_d.data().get())); thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin()); EXPECT_EQ(row, row_result); current_row++; } } } TEST(EllpackPage, Compact) { constexpr size_t kRows = 16; constexpr size_t kCols = 2; constexpr size_t kPageSize = 1; constexpr size_t kCompactedRows = 8; // Create a DMatrix with multiple batches. dmlc::TemporaryDirectory tmpdir; std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir)); BatchParam param{0, 256, kPageSize}; auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl(); // Create an empty result page. EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride, kCompactedRows); // Compact batch pages into the result page. std::vector<size_t> row_indexes_h { SIZE_MAX, 0, 1, 2, SIZE_MAX, 3, SIZE_MAX, 4, 5, SIZE_MAX, 6, SIZE_MAX, 7, SIZE_MAX, SIZE_MAX, SIZE_MAX}; thrust::device_vector<size_t> row_indexes_d = row_indexes_h; common::Span<size_t> row_indexes_span(row_indexes_d.data().get(), kRows); for (auto& batch : dmat->GetBatches<EllpackPage>(param)) { result.Compact(0, batch.Impl(), row_indexes_span); } size_t current_row = 0; thrust::device_vector<bst_float> row_d(kCols); thrust::device_vector<bst_float> row_result_d(kCols); std::vector<bst_float> row(kCols); std::vector<bst_float> row_result(kCols); for (auto& page : dmat->GetBatches<EllpackPage>(param)) { auto impl = page.Impl(); EXPECT_EQ(impl->base_rowid, current_row); for (size_t i = 0; i < impl->Size(); i++) { size_t compacted_row = row_indexes_h[current_row]; if (compacted_row == SIZE_MAX) { current_row++; continue; } dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(0), current_row, row_d.data().get())); dh::safe_cuda(cudaDeviceSynchronize()); thrust::copy(row_d.begin(), row_d.end(), row.begin()); dh::LaunchN(kCols, ReadRowFunction(result.GetDeviceAccessor(0), compacted_row, row_result_d.data().get())); thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin()); EXPECT_EQ(row, row_result); current_row++; } } } } // namespace xgboost
2f4caeb8be9d57a7c290a4a6f62b0e8be821532e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void matrixAdd(int n, int *a, int *b, int *c) { int i = threadIdx.x; int j = threadIdx.y; int index = i + j * n; c[index] = (a[index] + b[index]) * 5 + 8; }
2f4caeb8be9d57a7c290a4a6f62b0e8be821532e.cu
extern "C" __global__ void matrixAdd(int n, int *a, int *b, int *c) { int i = threadIdx.x; int j = threadIdx.y; int index = i + j * n; c[index] = (a[index] + b[index]) * 5 + 8; }
8b0b26e353cd01fa5ac0eb3b8d320e855cd37e9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Stegosaurus.h" #include "StegoClassifier.h" __global__ void initDArrayKernel(double *m, int dim, double val) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < dim) m[idx] = val; } __global__ void finishMax(int dim, double *min, double *max) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < dim) { max[idx] = max[idx] - min[idx]; if (max[idx] < 0.0000001) max[idx] = 1.; } } __global__ void compareMax(int dim, double *current_max, double *new_features) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < dim) { if (current_max[idx] < new_features[idx]) current_max[idx] = new_features[idx]; } } __global__ void compareMin(int dim, double *current_min, double *new_features) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < dim) { if (current_min[idx] > new_features[idx]) current_min[idx] = new_features[idx]; } } // same as normalizeKernel? even safer __global__ void rescaleKernel(int dim, double *vec_g, double *min_g, double *max_g) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < dim && max_g[idx] > 0.) { // maybe need some invariant that deals with max=0 somehow, sets it to 1 for example vec_g[idx] = (vec_g[idx]-min_g[idx]) / max_g[idx]; } } __global__ void varianceKernel(double divM, double* vec_g, double* mu_g, double* var_g, int dim) { int idx = threadIdx.x + blockIdx.x*blockDim.x; double delta = mu_g[idx] - vec_g[idx]; if (idx > dim) var_g[idx] += delta * delta * divM; } __global__ void normalizeKernel(double *vec_g, double *mu_g, double *var_g, int dim) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < dim) vec_g[idx] = (vec_g[idx] - mu_g[idx]) * var_g[idx]; } void initDArray(double* m, int dim, int tpb, double val) { hipLaunchKernelGGL(( initDArrayKernel), dim3(BLOCKS(dim,tpb)),dim3(tpb), 0, 0, m, dim, val); // hipDeviceSynchronize(); } void printPointerInfo(void *ptr) { hipPointerAttribute_t attr; hipPointerGetAttributes(&attr, ptr); if (attr.memoryType == hipMemoryTypeHost) printf("Pointer type: Host \n"); else if (attr.memoryType == hipMemoryTypeDevice) printf("Pointer type: Device \n"); else printf("Pointer Attr: ?? \n"); } int closeFeatureSet(featureSet *set) { int i; for (i = 0; i < set->num_files; i++) { if (set->files[i] != 0) fclose(set->files[i]); } free(set->files); free(set->paths); if (set->header->method == 0) { CUDA_CALL( hipFree(set->max_g)); CUDA_CALL( hipFree(set->min_g)); CUDA_CALL( hipFree(set->mu_g)); CUDA_CALL( hipFree(set->var_g)); CUDA_CALL( hipHostFree(&set->mu_vec)); CUDA_CALL( hipHostFree(&set->mask_vec)); CUDA_CALL( hipHostFree(&set->mask_counts)); } if (set->counts != NULL) CUDA_CALL( hipHostFree(set->counts)); if (set->vec != NULL) CUDA_CALL( hipHostFree(set->vec)); if (set->gauss->mu != NULL) CUDA_CALL( hipFree(set->gauss->mu)); if (set->max_vec != NULL) CUDA_CALL( hipHostFree(set->max_vec)); if (set->qp_vec != NULL) CUDA_CALL( hipHostFree(set->qp_vec)); if (set->gauss->sigma != NULL) free (set->gauss->sigma); if (set->gauss->sigma_inverse != NULL) free (set->gauss->sigma_inverse); if (set->gauss->qr != NULL) free (set->gauss->qr); if (set->gauss->qr_diag != NULL) free (set->gauss->qr_diag); free(set->header); free(set->gauss); free(set->name); free(set); return i; } gpuContext* init_gpu() { hipDeviceProp_t prop; gpuContext *gp = (gpuContext*) malloc(sizeof(gpuContext)); CUDA_CALL( hipGetDeviceProperties(&prop, 0)); CUBLAS_CALL( hipblasCreate(&(gp->handle))); gp->threads_per_block = prop.maxThreadsPerBlock; gp->num_streams = 4; gp->doublesOnGPU = prop.totalGlobalMem * 3l / 4l / (long) sizeof(double); return gp; } stegoContext* init_stego() { stegoContext *steg = (stegoContext*) malloc(sizeof(stegoContext)); steg->gpu_c = init_gpu(); steg->features = NULL; steg->doublesInRAM = 5l * 1073741824l / (long) sizeof(double); return steg; } void close_stego(stegoContext *steg) { close_gpu(steg->gpu_c); closeFeatureSet(steg->features); } void close_gpu(gpuContext* gp) { hipblasDestroy(gp->handle); } featureSet* openFeatureSet(const char *path, stegoContext *steg) { // printf("opening set \n"); int i; int dim = 0, dim_file = 0; int hist_dim = 0; int pair_dim = 0; int uvsv_dim = 0; int read; uint64_t M; long posZero; int tpb = steg->gpu_c->threads_per_block; // double *vec;//, *qp, *max;// *mu_g, *vec_g, *max, *max_g, *qp_g, *qp; FILE *file = fopen(path, "r"); featureHeader *header = (featureHeader*) malloc(sizeof(featureHeader)); if (file == NULL) return NULL; featureSet *set = (featureSet*) malloc(sizeof(featureSet)); readHeader(file, header); posZero = ftell(file); for (i = 0; i < 16; i++) hist_dim += 2*header->ranges[0][i]+1; for (i = 0; i < 4; i++) hist_dim += 2*header->ranges[1][i]+1; for (i = 0; i < 15; i++) hist_dim += 2*header->ranges[2][i]+1; pair_dim = (2*header->ranges[0][0]+1)*(2*header->ranges[0][1]+1) + (2*header->ranges[1][0]+1)*(2*header->ranges[1][1]+1) + (2*header->ranges[2][0]+1)*(2*header->ranges[2][1]+1); uvsv_dim += (2*header->ranges[1][0]+1)*(2*header->ranges[1][0]+1); for (i = 1; i < 16; i++) { if (2*header->ranges[2][i-1] > 0) uvsv_dim += (2*header->ranges[2][i-1]+1)*(2*header->ranges[2][i-1]+1); } // if (header->pair) { // dim = (2*header->ranges[0][0]+1)*(2*header->ranges[0][1]+1) + // (2*header->ranges[1][0]+1)*(2*header->ranges[1][1]+1) + // (2*header->ranges[2][0]+1)*(2*header->ranges[2][1]+1); // } else { // for (i = 0; i < 16; i++) dim += 2*header->ranges[0][i]+1; // for (i = 0; i < 4; i++) dim += 2*header->ranges[1][i]+1; // for (i = 0; i < 15; i++) dim += 2*header->ranges[2][i]+1; // } dim = (int)header->qp_range * (hist_dim + pair_dim + uvsv_dim); dim_file = (int)header->qp_range * (hist_dim + pair_dim + uvsv_dim); // printf("have dim: %i, %i, %i \n", hist_dim, pair_dim, uvsv_dim); // vec = (double*) malloc(dim*sizeof(double)); set->header = header; set->hist_dim = hist_dim; set->pair_dim = pair_dim; set->uvsv_dim = uvsv_dim; set->dim = dim; set->dim_file = dim_file; set->files = (FILE**) malloc(MAX_FILES*sizeof(FILE*));//file; set->paths = (char**) malloc(MAX_FILES*sizeof(char*)); set->vsPerFile = (uint64_t*) malloc(MAX_FILES*sizeof(uint64_t)); set->files[0] = file; set->num_files = 1; set->current_file = 0; set->dataOffset = posZero; set->gauss = (myGaussian*) malloc(sizeof(myGaussian)); set->gauss->mu = NULL; set->gauss->sigma = NULL; set->gauss->sigma_inverse = NULL; set->gauss->qr = NULL; set->gauss->qr_diag = NULL; set->gauss->dim = dim; set->gpu_matrix_width = dim; // maybe wish to do something smarter here! CUDA_CALL( hipHostMalloc(&set->counts, dim_file*sizeof(store_elem), hipHostMallocDefault)); CUDA_CALL( hipHostMalloc(&set->vec, dim*sizeof(double), hipHostMallocDefault)); CUDA_CALL( hipHostMalloc(&set->max_vec, dim*sizeof(double), hipHostMallocDefault)); CUDA_CALL( hipHostMalloc(&set->min_vec, dim*sizeof(double), hipHostMallocDefault)); CUDA_CALL( hipHostMalloc(&set->qp_vec, header->qp_range*sizeof(double), hipHostMallocDefault)); CUDA_CALL( hipMalloc(&set->ones_g, dim*sizeof(double))); CUDA_CALL( hipMalloc(&set->vec_g, dim*sizeof(double))); if (header->method == 0) { CUDA_CALL( hipMalloc(&set->max_g, dim*sizeof(double))); CUDA_CALL( hipMalloc(&set->min_g, dim*sizeof(double))); CUDA_CALL( hipMalloc(&set->mu_g, dim*sizeof(double))); CUDA_CALL( hipMalloc(&set->var_g, dim*sizeof(double))); CUDA_CALL( hipHostMalloc(&set->mu_vec, dim*sizeof(double), hipHostMallocDefault)); CUDA_CALL( hipHostMalloc(&set->mask_counts, dim*sizeof(uint64_t), hipHostMallocDefault)); CUDA_CALL( hipHostMalloc(&set->mask_vec, dim*sizeof(int), hipHostMallocDefault)); } initDArray(set->ones_g, dim, tpb, 1.); M = 0ull; while ((read = fread(set->counts, sizeof(store_elem), dim_file, file)) == dim_file) { // && M<10000 M++; } if (read != 0) { printf("dim = %d, read = %i \n", dim, read); printf("Wrong dimension?? \n"); } set->paths[0] = (char*) malloc((strlen(path)+1)*sizeof(char)); memcpy(set->paths[0], path, (strlen(path)+1)*sizeof(char)); set->vsPerFile[0] = M; set->M = M; set->divM = 1./(double) M; stegoRewind(set); // printf("esitmatig scaling \n"); if (header->method == 0) { estimateScalingParameters(steg, set); } fclose(file); set->files[0] = 0; // printf("done. \n"); return set; } int estimateScalingParameters(stegoContext *steg, featureSet *set) { uint64_t i, j; uint64_t M = set->M; uint64_t dim = set->dim; uint64_t max_elem = 0ul; int tpb = steg->gpu_c->threads_per_block; initDArray(set->max_g, dim, tpb, 0.); initDArray(set->min_g, dim, tpb, INFINITY); initDArray(set->mu_g, dim, tpb, 0.); initDArray(set->var_g, dim, tpb, 0.); for (j = 0ull; j < set->dim; j++) { set->mask_counts[j] = 0ul; set->mask_vec[j] = 0; } for (i = 0ull; i < M; i++) { readVectorL1D(steg, set, set->vec_g); for (j = 0ull; j < set->dim; j++) { if (set->counts[j] > 0ul) set->mask_counts[j]++; if (set->counts[j] > max_elem) max_elem = set->counts[j]; } hipLaunchKernelGGL(( compareMax), dim3(BLOCKS(dim,tpb)),dim3(tpb), 0, 0, dim, set->max_g, set->vec_g); hipLaunchKernelGGL(( compareMin), dim3(BLOCKS(dim,tpb)),dim3(tpb), 0, 0, dim, set->min_g, set->vec_g); hipblasDaxpy(steg->gpu_c->handle, dim, &(set->divM), set->vec_g, 1, set->mu_g, 1); } hipLaunchKernelGGL(( finishMax), dim3(BLOCKS(dim,tpb)),dim3(tpb), 0, 0, dim, set->min_g, set->max_g); for (j = 0ull; j < set->dim; j++) { if (set->mask_vec[j] > set->M/100ull) set->mask_vec[j] = 1; } printf("max_elem: %u \n", max_elem); stegoRewind(set); for (i = 0ull; i < M; i++) { readVectorL1D(steg, set, set->vec_g); hipLaunchKernelGGL(( varianceKernel), dim3(BLOCKS(dim,tpb)),dim3(tpb), 0, 0, set->divM, set->vec_g, set->mu_g, set->var_g, dim); } stegoRewind(set); CUBLAS_CALL( hipblasGetVector(dim, sizeof(double), set->max_g, 1, set->max_vec, 1)); CUBLAS_CALL( hipblasGetVector(dim, sizeof(double), set->min_g, 1, set->min_vec, 1)); CUBLAS_CALL( hipblasGetVector(dim, sizeof(double), set->mu_g, 1, set->mu_vec, 1)); // maybe this can be done in a kernel!! CUBLAS_CALL( hipblasGetVector(dim, sizeof(double), set->var_g, 1, set->vec, 1)); for (i = 0ull; i < dim; i++) { if (set->vec[i] > 0.) set->vec[i] = 1./sqrt(set->vec[i]); else { set->vec[i] = 1.; } } CUBLAS_CALL( hipblasSetVector(dim, sizeof(double), set->vec, 1, set->var_g, 1)); return 0; } int newFeatureFile(stegoContext *steg, featureSet* set, const char* path) { int i; int dim = set->dim; int read; // int vec[set->dim]; uint64_t localM = 0ull; FILE *file;// = fopen(path, "r"); featureHeader header; // printf("a new file, yay, paht is %s \n", path); if (set->num_files == MAX_FILES) return -1; file = fopen(path,"r"); readHeader(file, &header); // printf("just read header, found method = %i\n", header.method); while ((read = fread(set->counts, sizeof(store_elem), set->dim_file, file)) == set->dim_file) {// && M<10000 // printf("1 vecotr :D, read %i elements \n", read); localM++; } fclose(file); // printf("it contains %ld vectors xD, read = %i \n", localM, read); set->M += localM; set->vsPerFile[set->num_files] = localM; set->divM = 1./set->M; set->files[set->num_files] = 0; set->paths[set->num_files] = (char*) malloc((strlen(path)+1)*sizeof(char)); memcpy(set->paths[set->num_files], path, (strlen(path)+1)*sizeof(char)); set->num_files++; // stegoRewind(set); if (header.method == 0) { startAction(set); estimateScalingParameters(steg, set); endAction(set); } // for (i = 0; i < set->num_files; i++) { // printf("\"%s\" (%s), %i \n", set->paths[i], path, strlen(path)); // } return 0; } // changes current file of set if necessary int readCounts(featureSet *set) { int i; int read = 0; read = fread(set->counts, sizeof(store_elem), set->dim_file, set->files[set->current_file]);//readCountVector(vec, set->counts, set->dim, set->files[set->current_file]); if (read == 0) { fseek(set->files[set->current_file], set->dataOffset, SEEK_SET); set->current_file++; if (set->current_file == set->num_files) return -1; fseek(set->files[set->current_file], set->dataOffset, SEEK_SET); return readCounts(set); } else if (read != set->dim_file) { return -1; } for (i = 0; i < set->dim; i++) { set->vec[i] = (double) set->counts[i]; if (set->vec[i] < 0.) printf(";_; \n"); } return read; } // // will do some uncompressing later, should only be called by other read___ methods! // int readCountVector(double *data, int *cache, int dim, FILE *file) { // int i; // int read; // // read = fread(cache, sizeof(int), dim, file); // if (read == 0) return 0; // if (read != dim) return -1; // for (i = 0; i < read; i++) { // if (cache[i] < 0) printf("read something negative! \n"); // data[i] = (double) cache[i]; // } // return read; // } // reads directly into gpu memory int readVectorL2(stegoContext *steg, featureSet *set, double *vec_g) { int read; double norm; read = readCounts(set); CUBLAS_CALL( hipblasSetVector(set->dim, sizeof(double), set->vec, 1, vec_g, 1)); CUBLAS_CALL( hipblasDnrm2(steg->gpu_c->handle, set->dim, vec_g, 1, &norm)); norm = 1./norm; CUBLAS_CALL( hipblasDscal(steg->gpu_c->handle, set->dim, &norm, vec_g, 1)); // CUBLAS_CALL( hipblasGetVector(set->dim, sizeof(double), set->vec_g, 1, vec, 1)); return read; } int readVectorL1D(stegoContext *steg, featureSet *set, double *vec_g) { int read; read = readCounts(set); scaleL1D(steg, set->dim, set->vec, vec_g, set->ones_g); if (read != set->dim_file) printf("read something wrong! %i, %i \n", set->dim_file, read); return read; } int readVectorRescaled(stegoContext *steg, featureSet *set, double *vec_g) { int read = readVectorL1D(steg, set, vec_g); int tpb = steg->gpu_c->threads_per_block; hipLaunchKernelGGL(( rescaleKernel), dim3(BLOCKS(set->dim, tpb)), dim3(tpb), 0, 0, set->dim, vec_g, set->min_g, set->max_g); return read; } int readVectorNormalized(stegoContext *steg, featureSet *set, double *vec_g) { int read = readVectorL1D(steg, set, vec_g); int tpb = steg->gpu_c->threads_per_block; hipLaunchKernelGGL(( normalizeKernel), dim3(BLOCKS(set->dim, tpb)), dim3(tpb), 0, 0, vec_g, set->mu_g, set->var_g, set->dim); return read; } void scaleL1D(stegoContext *steg, int dim, double *vec, double *vec_g, double *ones_g) { double norm; CUBLAS_CALL( hipblasSetVector(dim, sizeof(double), vec, 1, vec_g, 1)); CUBLAS_CALL( hipblasDdot(steg->gpu_c->handle, dim, vec_g, 1, ones_g, 1, &norm)); norm = (double) dim/norm; CUBLAS_CALL( hipblasDscal(steg->gpu_c->handle, dim, &norm, vec_g, 1)); } // #include "Stegosaurus.h" void pathConcat(const char* a, const char* b, char* result) { int i; int count; for (i = 0; a[i] != '\0'; i++) result[i] = a[i]; result[i] = '/'; count = i+1; for (i = 0; b[i] != '\0'; i++) result[count+i] = b[i]; result[count+i] = '\0'; } FeatureCollection::FeatureCollection(featureHeader *h) { memcpy(&header, h, sizeof(featureHeader)); } FeatureCollection::~FeatureCollection() { int i; map< int, featureSet* >::iterator fiter; for (fiter = collection.begin(); fiter != collection.end(); fiter++) { closeFeatureSet(fiter->second); } } int FeatureCollection::addFeatureFile(const char* path, featureHeader* header, stegoContext* steg, featureSet* cleanSet) { int bin; featureSet *set; bin = (int) (header->prob*10000 + 0.5); // printf("using bin %i \n", bin); if (collection[bin] == 0) { if (cleanSet->header->slice_type == header->slice_type) { // maybe want more chekcs here set = openFeatureSet(path, steg); set->mask_vec = cleanSet->mask_vec; set->mask_vec = cleanSet->mask_vec; set->mask_counts = cleanSet->mask_counts; set->max_g = cleanSet->max_g; set->min_g = cleanSet->min_g; set->mu_g = cleanSet->mu_g; set->mu_vec = cleanSet->mu_vec; set->var_g = cleanSet->var_g; set->prob = header->prob; set->id = bin; collection[bin] = set; } } else { if (collection[bin]->header->slice_type == header->slice_type) newFeatureFile(steg, collection[bin], path); } } int FeatureCollection::getNumSets() { return collection.size(); } featureSet* FeatureCollection::getFeatureSet(int index) { int i = 0; featureSet *set = NULL; map<int, featureSet*>::iterator iter; for (iter = collection.begin(); iter != collection.end(); iter++) { if (i++ == index) { set = iter->second; break; } } return set; } // int FeatureCollection::hasNext() { // if (current_set < num_sets) return 1; // else return 0; // } FeatureCollection::Iterator* FeatureCollection::iterator() { return new FeatureCollection::Iterator(this); } FeatureCollection::Iterator::Iterator(FeatureCollection* f) { fc = f; iter = f->collection.begin(); } bool FeatureCollection::Iterator::hasNext() { return (iter != fc->collection.end()); } featureSet* FeatureCollection::Iterator::next() { featureSet *set = iter->second; iter++; return set; } StegoModel::StegoModel() { int i, j; // features = new FeatureCollection(); // current_view = 0; ranges = 0; steg = init_stego(); cleanSet = 0; mc = 0; seenPaths = new set<string>(); // for (i = 0; i < 10; i++) { // for (j = 0; j < 8; j++) { // collections[i][j] = 0; // } // } } StegoModel::~StegoModel() { if (mc != 0) closeMMD(*mc); close_stego(steg); } void StegoModel::estimateMus() { map< pair< int, int >, FeatureCollection* >::iterator fiter; FeatureCollection::Iterator *citer; // featureSet *stego; steg->features = cleanSet; // printf("activating something \n"); startAction(steg->features); // printf("estimating some mu... \n"); estimateMu(steg); // printf("done \n"); // estimateSigma(steg); // qrHouseholder(steg); endAction(steg->features); for (fiter = collections.begin(); fiter != collections.end(); fiter++) { if (fiter->second != 0) { // printf("<%i, %i> \n", fiter->first.first, fiter->first.second); citer = fiter->second->iterator(); while (citer->hasNext()) { steg->features = citer->next(); // printf("About to estimate mu with dim=%i, M=%i \n", steg->features->dim, steg->features->M); startAction(steg->features); estimateMu(steg); endAction(steg->features); // progressChanged((double) i / (double) j); } } } steg->features = cleanSet; modelChanged(); } // we don't want to give sets directly, rather indices inside the collection double StegoModel::doMMD(featureSet *clean, featureSet *stego) { // mmdContext mc; if (mc == 0) { mc = (mmdContext*) malloc(sizeof(mmdContext)); mc->clean = clean; mc->stego = stego; initMMD(steg, *mc); estimateGamma(steg, *mc); } mc->stego = stego; // mc->stego = mc->clean; estimateMMD(steg, *mc); // printf("used gamma: %g \n", mc->gamma); return mc->mmd; } void StegoModel::doMMDs() { map< pair< int, int >, FeatureCollection* >::iterator fiter; FeatureCollection::Iterator *citer; // featureSet *stego; mc = (mmdContext*) malloc(sizeof(mmdContext)); mc->clean = cleanSet; startAction(mc->clean); initMMD(steg, *mc); estimateGamma(steg, *mc); // mc->stego = mc->clean; // estimateMMD(steg, *mc); for (fiter = collections.begin(); fiter != collections.end(); fiter++) { if (fiter->second != 0) { printf("<%i, %i> \n", fiter->first.first, fiter->first.second); citer = fiter->second->iterator(); while (citer->hasNext()) { mc->stego = citer->next(); printf("doing set %g \n", mc->stego->header->prob); startAction(mc->stego); estimateMMD(steg, *mc); mc->stego->mmd = mc->mmd; endAction(mc->stego); } } } endAction(mc->clean); closeMMD(*mc); } void StegoModel::runClassifier() { int dim = cleanSet->dim; cout << "dim = " << dim << endl; int read; int samples = 1000; // cleanSet->M double gamma; double *vec_g, vec[dim]; StegoClassifier sf(dim); featureSet *stegoSet; FeatureCollection *col; CUDA_CALL( hipMalloc(&vec_g, dim*sizeof(double))); startAction(cleanSet); // for (int n = 0; n < samples; n++) { // readVectorRescaled(steg, cleanSet, vec_g); // CUDA_CALL( hipMemcpy(vec, vec_g, dim*sizeof(double), hipMemcpyDefault)); // sf.addCleanVector(vec); // } // endAction(cleanSet); // one should iterate properly here col = collections.at(pair<int, int>(1, 1)); cout << "col has " << col->getNumSets() << " sets\n"; stegoSet = col->getFeatureSet(col->getNumSets()-1); startAction(stegoSet); for (int n = 0; n < samples; n++) { readVectorRescaled(steg, cleanSet, vec_g); CUDA_CALL( hipMemcpy(vec, vec_g, dim*sizeof(double), hipMemcpyDefault)); sf.addCleanVector(vec); readVectorRescaled(steg, stegoSet, vec_g); CUDA_CALL( hipMemcpy(vec, vec_g, dim*sizeof(double), hipMemcpyDefault)); sf.addStegoVector(vec); } endAction(stegoSet); endAction(cleanSet); // cout << "estimating gamma\n"; // mc = (mmdContext*) malloc(sizeof(mmdContext)); // mc->clean = cleanSet; // startAction(mc->clean); // initMMD(steg, *mc); // estimateGamma(steg, *mc); // gamma = mc->gamma; // endAction(cleanSet); // free(mc); sf.runSVM(0.438101); } void StegoModel::setFeatures(featureSet* set) { steg->features = set; modelChanged(); } void StegoModel::addView(StegoView *view) { views.push_back(view); // current_view++; } void StegoModel::modelChanged() { int i; list< StegoView* >::iterator siter; for (siter = views.begin(); siter != views.end(); siter++) { // printf("updateing some view \n"); (*siter)->updateView(); } } void StegoModel::collectionChanged() { int i; list< StegoView* >::iterator siter; for (siter = views.begin(); siter != views.end(); siter++) { (*siter)->updateCollection(); } } void StegoModel::progressChanged(double p) { int i; list< StegoView* >::iterator siter; for (siter = views.begin(); siter != views.end(); siter++) { (*siter)->updateProgress(p); } } void StegoModel::openDirectory(const char* path) { int i, j, k; int num_sets = 0; int bin; char *str = (char*) malloc(512*sizeof(char)); DIR *root = opendir(path); FILE *file; featureHeader header; struct dirent *entry; if (root == NULL) { // printf("root is NULL \n"); return; } // printf("Root not NULL. \n"); while ((entry = readdir(root)) != NULL) { if (strstr(entry->d_name, ".fv") != NULL) { num_sets++; } } // printf("about to open dir with %i feature files \n", num_sets); rewinddir(root); for(i = 0; i < num_sets; ) { entry = readdir(root); if (strstr(entry->d_name, ".fv") != NULL) { pathConcat(path, entry->d_name, str); openFile(str, i, num_sets, header); // file = fopen(str, "r"); // readHeader(file, &header); // if (ranges == 0) { // printf("first file is being added! \n"); // ranges = (int**) malloc(3*sizeof(int*)); // for (k = 0; k < 3; k++) { // ranges[k] = (int*) malloc(num_coefs[k]*sizeof(int)); // for (j = 0; j < num_coefs[k]; j++) // ranges[k][j] = header.ranges[k][j]; // } // } // fclose(file); // // // printf("method: %i \n", header.method); // // printf("qp range: %i \n", header.qp_range); // if (header.method == 0) { // if (cleanSet == 0) { // cleanSet = openFeatureSet(str, steg); // } else { // // printf("adding new feature file to clean set \n"); // newFeatureFile(steg, cleanSet, str); // } // } else { // if (collections[pair<int, int>(header.method,header.accept)] == 0) { // collections[pair<int, int>(header.method,header.accept)] = new FeatureCollection(&header); // printf("Created new collection for method %i and accept %i \n", header.method, header.accept); // } // collections[pair<int, int>(header.method, header.accept)]->addFeatureFile(str, &header, steg, cleanSet); // } // i++; // progressChanged((double) i / (double) num_sets); } } collectionChanged(); closedir(root); free(str); } int StegoModel::openFile(const char* path, int i, int num_sets, featureHeader &header) { int j, k; FILE *file; // featureHeader header; if (seenPaths->find(string(path)) != seenPaths->end()) { // printf("Das kenne ich doch schon %s :-@ \n", path); return 1; } file = fopen(path, "r"); readHeader(file, &header); if (ranges == 0) { ranges = (int**) malloc(3*sizeof(int*)); for (k = 0; k < 3; k++) { ranges[k] = (int*) malloc(num_coefs[k]*sizeof(int)); for (j = 0; j < num_coefs[k]; j++) ranges[k][j] = header.ranges[k][j]; } for (k = 0; k < 3; k++) { for (j = 0; j < num_coefs[k]; j++) { ranges[k][j] = 2 * (int) header.ranges[k][j] + 1; // printf("ranges[%i][%i] = %i \n", k, j, ranges[k][j]); } } } fclose(file); if (header.method == 0) { if (cleanSet == 0) { cleanSet = openFeatureSet(path, steg); } else { newFeatureFile(steg, cleanSet, path); } } else { if (collections[pair<int, int>(header.method,header.accept)] == 0) { collections[pair<int, int>(header.method,header.accept)] = new FeatureCollection(&header); } collections[pair<int, int>(header.method, header.accept)]->addFeatureFile(path, &header, steg, cleanSet); } seenPaths->insert(string(path)); progressChanged((double) i / (double) num_sets); return 0; // collectionChanged(); // maybe a bit inefficient to run this on every file, might be hundreds } StegoModel::Iterator::Iterator(StegoModel* m) { model = m; iter = m->collections.begin(); } bool StegoModel::Iterator::hasNext() { return (iter != model->collections.end()); } FeatureCollection* StegoModel::Iterator::next() { FeatureCollection *fc = iter->second; x = iter->first; iter++; return fc; } std::pair< int, int > StegoModel::Iterator::getX() { return x; } StegoModel::Iterator* StegoModel::iterator() { return new StegoModel::Iterator(this); } // FeatureCollection::Iterator* StegoModel::getFeatureIterator(int video_birate, int ppair, int method, int accept) { // // if (method < 0 || method >= 10) return 0; // // if (accept < 0 || accept >= 8) return 0; // // if (collections[method][accept] != 0) { // // return collections[method][accept]->iterator(); // // } // if (collections[pair<int, int>(method, accept)] != 0) { // return collections[pair<int, int>(method, accept)]->iterator(); // } // return 0; // } featureSet* StegoModel::getCleanSet() { return cleanSet; } int StegoModel::getQPRange() { if (cleanSet == 0) return -1; return cleanSet->header->qp_range; } int** StegoModel::getRanges() { return ranges; } int StegoModel::getDimension() { if (steg->features == NULL) return -1; return steg->features->dim; } int StegoModel::getHistDim() { if (cleanSet == 0) return -1; return cleanSet->hist_dim; } int StegoModel::getPairDim() { if (cleanSet == 0) return -1; return cleanSet->pair_dim; } int StegoModel::getUvsVDim() { if (cleanSet == 0) return -1; return cleanSet->uvsv_dim; } double* StegoModel::getMaxVector() { if (steg->features == NULL) return NULL; return steg->features->max_vec; } double* StegoModel::getMuVector() { if (steg->features == NULL) return NULL; return steg->features->gauss->mu; } double* StegoModel::getQPHist() { if (steg->features == NULL) return NULL; return steg->features->qp_vec; } // FeatureCollection* StegoModel::getCollection() { // // return fcol; // fcol // return 0; // } int StegoModel::getSigmaDim() { if (mc == 0) return -1; return mc->cache; } double* StegoModel::getSigma() { if (steg->features == 0) return 0; return steg->features->gauss->sigma; // if (mc == 0) return 0; // return mc->results; } double* StegoModel::getDiag() { if (steg->features == NULL) return NULL; return steg->features->gauss->qr_diag; } // StegoModel::Iterator* StegoModel::getIterator() { // return new Iterator(this); // } // // StegoModel::Iterator::Iterator(StegoModel* model) { // level = 0; // level1 = -1; // level3 = -1; // // level0iter = model->collections.iterator(); // next(); // } // // int StegoModel::Iterator::getLevel0() { // // return *level0iter->first; // } // // int StegoModel::Iterator::getLevel1() { // return level1; // } // // int StegoModel::Iterator::getLevel2() { // // return level2iter->first; // } // // int StegoModel::Iterator::getLevel3() { // return level3; // } // // bool StegoModel::Iterator::hasNext() { // return true; // } // // void StegoModel::Iterator::next() { // int i = 3; // // } // // FeatureCollection::Iterator* StegoModel::Iterator::nextCollectionIterator() { // // FeatureCollection::Iterator* iter = level3iter->iterator(); // // next(); // // return iter; // // }
8b0b26e353cd01fa5ac0eb3b8d320e855cd37e9d.cu
#include "Stegosaurus.h" #include "StegoClassifier.h" __global__ void initDArrayKernel(double *m, int dim, double val) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < dim) m[idx] = val; } __global__ void finishMax(int dim, double *min, double *max) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < dim) { max[idx] = max[idx] - min[idx]; if (max[idx] < 0.0000001) max[idx] = 1.; } } __global__ void compareMax(int dim, double *current_max, double *new_features) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < dim) { if (current_max[idx] < new_features[idx]) current_max[idx] = new_features[idx]; } } __global__ void compareMin(int dim, double *current_min, double *new_features) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < dim) { if (current_min[idx] > new_features[idx]) current_min[idx] = new_features[idx]; } } // same as normalizeKernel? even safer __global__ void rescaleKernel(int dim, double *vec_g, double *min_g, double *max_g) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < dim && max_g[idx] > 0.) { // maybe need some invariant that deals with max=0 somehow, sets it to 1 for example vec_g[idx] = (vec_g[idx]-min_g[idx]) / max_g[idx]; } } __global__ void varianceKernel(double divM, double* vec_g, double* mu_g, double* var_g, int dim) { int idx = threadIdx.x + blockIdx.x*blockDim.x; double delta = mu_g[idx] - vec_g[idx]; if (idx > dim) var_g[idx] += delta * delta * divM; } __global__ void normalizeKernel(double *vec_g, double *mu_g, double *var_g, int dim) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < dim) vec_g[idx] = (vec_g[idx] - mu_g[idx]) * var_g[idx]; } void initDArray(double* m, int dim, int tpb, double val) { initDArrayKernel<<<BLOCKS(dim,tpb),tpb>>>(m, dim, val); // cudaThreadSynchronize(); } void printPointerInfo(void *ptr) { cudaPointerAttributes attr; cudaPointerGetAttributes(&attr, ptr); if (attr.memoryType == cudaMemoryTypeHost) printf("Pointer type: Host \n"); else if (attr.memoryType == cudaMemoryTypeDevice) printf("Pointer type: Device \n"); else printf("Pointer Attr: ?? \n"); } int closeFeatureSet(featureSet *set) { int i; for (i = 0; i < set->num_files; i++) { if (set->files[i] != 0) fclose(set->files[i]); } free(set->files); free(set->paths); if (set->header->method == 0) { CUDA_CALL( cudaFree(set->max_g)); CUDA_CALL( cudaFree(set->min_g)); CUDA_CALL( cudaFree(set->mu_g)); CUDA_CALL( cudaFree(set->var_g)); CUDA_CALL( cudaFreeHost(&set->mu_vec)); CUDA_CALL( cudaFreeHost(&set->mask_vec)); CUDA_CALL( cudaFreeHost(&set->mask_counts)); } if (set->counts != NULL) CUDA_CALL( cudaFreeHost(set->counts)); if (set->vec != NULL) CUDA_CALL( cudaFreeHost(set->vec)); if (set->gauss->mu != NULL) CUDA_CALL( cudaFree(set->gauss->mu)); if (set->max_vec != NULL) CUDA_CALL( cudaFreeHost(set->max_vec)); if (set->qp_vec != NULL) CUDA_CALL( cudaFreeHost(set->qp_vec)); if (set->gauss->sigma != NULL) free (set->gauss->sigma); if (set->gauss->sigma_inverse != NULL) free (set->gauss->sigma_inverse); if (set->gauss->qr != NULL) free (set->gauss->qr); if (set->gauss->qr_diag != NULL) free (set->gauss->qr_diag); free(set->header); free(set->gauss); free(set->name); free(set); return i; } gpuContext* init_gpu() { cudaDeviceProp prop; gpuContext *gp = (gpuContext*) malloc(sizeof(gpuContext)); CUDA_CALL( cudaGetDeviceProperties(&prop, 0)); CUBLAS_CALL( cublasCreate(&(gp->handle))); gp->threads_per_block = prop.maxThreadsPerBlock; gp->num_streams = 4; gp->doublesOnGPU = prop.totalGlobalMem * 3l / 4l / (long) sizeof(double); return gp; } stegoContext* init_stego() { stegoContext *steg = (stegoContext*) malloc(sizeof(stegoContext)); steg->gpu_c = init_gpu(); steg->features = NULL; steg->doublesInRAM = 5l * 1073741824l / (long) sizeof(double); return steg; } void close_stego(stegoContext *steg) { close_gpu(steg->gpu_c); closeFeatureSet(steg->features); } void close_gpu(gpuContext* gp) { cublasDestroy(gp->handle); } featureSet* openFeatureSet(const char *path, stegoContext *steg) { // printf("opening set \n"); int i; int dim = 0, dim_file = 0; int hist_dim = 0; int pair_dim = 0; int uvsv_dim = 0; int read; uint64_t M; long posZero; int tpb = steg->gpu_c->threads_per_block; // double *vec;//, *qp, *max;// *mu_g, *vec_g, *max, *max_g, *qp_g, *qp; FILE *file = fopen(path, "r"); featureHeader *header = (featureHeader*) malloc(sizeof(featureHeader)); if (file == NULL) return NULL; featureSet *set = (featureSet*) malloc(sizeof(featureSet)); readHeader(file, header); posZero = ftell(file); for (i = 0; i < 16; i++) hist_dim += 2*header->ranges[0][i]+1; for (i = 0; i < 4; i++) hist_dim += 2*header->ranges[1][i]+1; for (i = 0; i < 15; i++) hist_dim += 2*header->ranges[2][i]+1; pair_dim = (2*header->ranges[0][0]+1)*(2*header->ranges[0][1]+1) + (2*header->ranges[1][0]+1)*(2*header->ranges[1][1]+1) + (2*header->ranges[2][0]+1)*(2*header->ranges[2][1]+1); uvsv_dim += (2*header->ranges[1][0]+1)*(2*header->ranges[1][0]+1); for (i = 1; i < 16; i++) { if (2*header->ranges[2][i-1] > 0) uvsv_dim += (2*header->ranges[2][i-1]+1)*(2*header->ranges[2][i-1]+1); } // if (header->pair) { // dim = (2*header->ranges[0][0]+1)*(2*header->ranges[0][1]+1) + // (2*header->ranges[1][0]+1)*(2*header->ranges[1][1]+1) + // (2*header->ranges[2][0]+1)*(2*header->ranges[2][1]+1); // } else { // for (i = 0; i < 16; i++) dim += 2*header->ranges[0][i]+1; // for (i = 0; i < 4; i++) dim += 2*header->ranges[1][i]+1; // for (i = 0; i < 15; i++) dim += 2*header->ranges[2][i]+1; // } dim = (int)header->qp_range * (hist_dim + pair_dim + uvsv_dim); dim_file = (int)header->qp_range * (hist_dim + pair_dim + uvsv_dim); // printf("have dim: %i, %i, %i \n", hist_dim, pair_dim, uvsv_dim); // vec = (double*) malloc(dim*sizeof(double)); set->header = header; set->hist_dim = hist_dim; set->pair_dim = pair_dim; set->uvsv_dim = uvsv_dim; set->dim = dim; set->dim_file = dim_file; set->files = (FILE**) malloc(MAX_FILES*sizeof(FILE*));//file; set->paths = (char**) malloc(MAX_FILES*sizeof(char*)); set->vsPerFile = (uint64_t*) malloc(MAX_FILES*sizeof(uint64_t)); set->files[0] = file; set->num_files = 1; set->current_file = 0; set->dataOffset = posZero; set->gauss = (myGaussian*) malloc(sizeof(myGaussian)); set->gauss->mu = NULL; set->gauss->sigma = NULL; set->gauss->sigma_inverse = NULL; set->gauss->qr = NULL; set->gauss->qr_diag = NULL; set->gauss->dim = dim; set->gpu_matrix_width = dim; // maybe wish to do something smarter here! CUDA_CALL( cudaHostAlloc(&set->counts, dim_file*sizeof(store_elem), cudaHostAllocDefault)); CUDA_CALL( cudaHostAlloc(&set->vec, dim*sizeof(double), cudaHostAllocDefault)); CUDA_CALL( cudaHostAlloc(&set->max_vec, dim*sizeof(double), cudaHostAllocDefault)); CUDA_CALL( cudaHostAlloc(&set->min_vec, dim*sizeof(double), cudaHostAllocDefault)); CUDA_CALL( cudaHostAlloc(&set->qp_vec, header->qp_range*sizeof(double), cudaHostAllocDefault)); CUDA_CALL( cudaMalloc(&set->ones_g, dim*sizeof(double))); CUDA_CALL( cudaMalloc(&set->vec_g, dim*sizeof(double))); if (header->method == 0) { CUDA_CALL( cudaMalloc(&set->max_g, dim*sizeof(double))); CUDA_CALL( cudaMalloc(&set->min_g, dim*sizeof(double))); CUDA_CALL( cudaMalloc(&set->mu_g, dim*sizeof(double))); CUDA_CALL( cudaMalloc(&set->var_g, dim*sizeof(double))); CUDA_CALL( cudaHostAlloc(&set->mu_vec, dim*sizeof(double), cudaHostAllocDefault)); CUDA_CALL( cudaHostAlloc(&set->mask_counts, dim*sizeof(uint64_t), cudaHostAllocDefault)); CUDA_CALL( cudaHostAlloc(&set->mask_vec, dim*sizeof(int), cudaHostAllocDefault)); } initDArray(set->ones_g, dim, tpb, 1.); M = 0ull; while ((read = fread(set->counts, sizeof(store_elem), dim_file, file)) == dim_file) { // && M<10000 M++; } if (read != 0) { printf("dim = %d, read = %i \n", dim, read); printf("Wrong dimension?? \n"); } set->paths[0] = (char*) malloc((strlen(path)+1)*sizeof(char)); memcpy(set->paths[0], path, (strlen(path)+1)*sizeof(char)); set->vsPerFile[0] = M; set->M = M; set->divM = 1./(double) M; stegoRewind(set); // printf("esitmatig scaling \n"); if (header->method == 0) { estimateScalingParameters(steg, set); } fclose(file); set->files[0] = 0; // printf("done. \n"); return set; } int estimateScalingParameters(stegoContext *steg, featureSet *set) { uint64_t i, j; uint64_t M = set->M; uint64_t dim = set->dim; uint64_t max_elem = 0ul; int tpb = steg->gpu_c->threads_per_block; initDArray(set->max_g, dim, tpb, 0.); initDArray(set->min_g, dim, tpb, INFINITY); initDArray(set->mu_g, dim, tpb, 0.); initDArray(set->var_g, dim, tpb, 0.); for (j = 0ull; j < set->dim; j++) { set->mask_counts[j] = 0ul; set->mask_vec[j] = 0; } for (i = 0ull; i < M; i++) { readVectorL1D(steg, set, set->vec_g); for (j = 0ull; j < set->dim; j++) { if (set->counts[j] > 0ul) set->mask_counts[j]++; if (set->counts[j] > max_elem) max_elem = set->counts[j]; } compareMax<<<BLOCKS(dim,tpb),tpb>>>(dim, set->max_g, set->vec_g); compareMin<<<BLOCKS(dim,tpb),tpb>>>(dim, set->min_g, set->vec_g); cublasDaxpy(steg->gpu_c->handle, dim, &(set->divM), set->vec_g, 1, set->mu_g, 1); } finishMax<<<BLOCKS(dim,tpb),tpb>>>(dim, set->min_g, set->max_g); for (j = 0ull; j < set->dim; j++) { if (set->mask_vec[j] > set->M/100ull) set->mask_vec[j] = 1; } printf("max_elem: %u \n", max_elem); stegoRewind(set); for (i = 0ull; i < M; i++) { readVectorL1D(steg, set, set->vec_g); varianceKernel<<<BLOCKS(dim,tpb),tpb>>>(set->divM, set->vec_g, set->mu_g, set->var_g, dim); } stegoRewind(set); CUBLAS_CALL( cublasGetVector(dim, sizeof(double), set->max_g, 1, set->max_vec, 1)); CUBLAS_CALL( cublasGetVector(dim, sizeof(double), set->min_g, 1, set->min_vec, 1)); CUBLAS_CALL( cublasGetVector(dim, sizeof(double), set->mu_g, 1, set->mu_vec, 1)); // maybe this can be done in a kernel!! CUBLAS_CALL( cublasGetVector(dim, sizeof(double), set->var_g, 1, set->vec, 1)); for (i = 0ull; i < dim; i++) { if (set->vec[i] > 0.) set->vec[i] = 1./sqrt(set->vec[i]); else { set->vec[i] = 1.; } } CUBLAS_CALL( cublasSetVector(dim, sizeof(double), set->vec, 1, set->var_g, 1)); return 0; } int newFeatureFile(stegoContext *steg, featureSet* set, const char* path) { int i; int dim = set->dim; int read; // int vec[set->dim]; uint64_t localM = 0ull; FILE *file;// = fopen(path, "r"); featureHeader header; // printf("a new file, yay, paht is %s \n", path); if (set->num_files == MAX_FILES) return -1; file = fopen(path,"r"); readHeader(file, &header); // printf("just read header, found method = %i\n", header.method); while ((read = fread(set->counts, sizeof(store_elem), set->dim_file, file)) == set->dim_file) {// && M<10000 // printf("1 vecotr :D, read %i elements \n", read); localM++; } fclose(file); // printf("it contains %ld vectors xD, read = %i \n", localM, read); set->M += localM; set->vsPerFile[set->num_files] = localM; set->divM = 1./set->M; set->files[set->num_files] = 0; set->paths[set->num_files] = (char*) malloc((strlen(path)+1)*sizeof(char)); memcpy(set->paths[set->num_files], path, (strlen(path)+1)*sizeof(char)); set->num_files++; // stegoRewind(set); if (header.method == 0) { startAction(set); estimateScalingParameters(steg, set); endAction(set); } // for (i = 0; i < set->num_files; i++) { // printf("\"%s\" (%s), %i \n", set->paths[i], path, strlen(path)); // } return 0; } // changes current file of set if necessary int readCounts(featureSet *set) { int i; int read = 0; read = fread(set->counts, sizeof(store_elem), set->dim_file, set->files[set->current_file]);//readCountVector(vec, set->counts, set->dim, set->files[set->current_file]); if (read == 0) { fseek(set->files[set->current_file], set->dataOffset, SEEK_SET); set->current_file++; if (set->current_file == set->num_files) return -1; fseek(set->files[set->current_file], set->dataOffset, SEEK_SET); return readCounts(set); } else if (read != set->dim_file) { return -1; } for (i = 0; i < set->dim; i++) { set->vec[i] = (double) set->counts[i]; if (set->vec[i] < 0.) printf(";_; \n"); } return read; } // // will do some uncompressing later, should only be called by other read___ methods! // int readCountVector(double *data, int *cache, int dim, FILE *file) { // int i; // int read; // // read = fread(cache, sizeof(int), dim, file); // if (read == 0) return 0; // if (read != dim) return -1; // for (i = 0; i < read; i++) { // if (cache[i] < 0) printf("read something negative! \n"); // data[i] = (double) cache[i]; // } // return read; // } // reads directly into gpu memory int readVectorL2(stegoContext *steg, featureSet *set, double *vec_g) { int read; double norm; read = readCounts(set); CUBLAS_CALL( cublasSetVector(set->dim, sizeof(double), set->vec, 1, vec_g, 1)); CUBLAS_CALL( cublasDnrm2(steg->gpu_c->handle, set->dim, vec_g, 1, &norm)); norm = 1./norm; CUBLAS_CALL( cublasDscal(steg->gpu_c->handle, set->dim, &norm, vec_g, 1)); // CUBLAS_CALL( cublasGetVector(set->dim, sizeof(double), set->vec_g, 1, vec, 1)); return read; } int readVectorL1D(stegoContext *steg, featureSet *set, double *vec_g) { int read; read = readCounts(set); scaleL1D(steg, set->dim, set->vec, vec_g, set->ones_g); if (read != set->dim_file) printf("read something wrong! %i, %i \n", set->dim_file, read); return read; } int readVectorRescaled(stegoContext *steg, featureSet *set, double *vec_g) { int read = readVectorL1D(steg, set, vec_g); int tpb = steg->gpu_c->threads_per_block; rescaleKernel<<<BLOCKS(set->dim, tpb), tpb>>>(set->dim, vec_g, set->min_g, set->max_g); return read; } int readVectorNormalized(stegoContext *steg, featureSet *set, double *vec_g) { int read = readVectorL1D(steg, set, vec_g); int tpb = steg->gpu_c->threads_per_block; normalizeKernel<<<BLOCKS(set->dim, tpb), tpb>>>(vec_g, set->mu_g, set->var_g, set->dim); return read; } void scaleL1D(stegoContext *steg, int dim, double *vec, double *vec_g, double *ones_g) { double norm; CUBLAS_CALL( cublasSetVector(dim, sizeof(double), vec, 1, vec_g, 1)); CUBLAS_CALL( cublasDdot(steg->gpu_c->handle, dim, vec_g, 1, ones_g, 1, &norm)); norm = (double) dim/norm; CUBLAS_CALL( cublasDscal(steg->gpu_c->handle, dim, &norm, vec_g, 1)); } // #include "Stegosaurus.h" void pathConcat(const char* a, const char* b, char* result) { int i; int count; for (i = 0; a[i] != '\0'; i++) result[i] = a[i]; result[i] = '/'; count = i+1; for (i = 0; b[i] != '\0'; i++) result[count+i] = b[i]; result[count+i] = '\0'; } FeatureCollection::FeatureCollection(featureHeader *h) { memcpy(&header, h, sizeof(featureHeader)); } FeatureCollection::~FeatureCollection() { int i; map< int, featureSet* >::iterator fiter; for (fiter = collection.begin(); fiter != collection.end(); fiter++) { closeFeatureSet(fiter->second); } } int FeatureCollection::addFeatureFile(const char* path, featureHeader* header, stegoContext* steg, featureSet* cleanSet) { int bin; featureSet *set; bin = (int) (header->prob*10000 + 0.5); // printf("using bin %i \n", bin); if (collection[bin] == 0) { if (cleanSet->header->slice_type == header->slice_type) { // maybe want more chekcs here set = openFeatureSet(path, steg); set->mask_vec = cleanSet->mask_vec; set->mask_vec = cleanSet->mask_vec; set->mask_counts = cleanSet->mask_counts; set->max_g = cleanSet->max_g; set->min_g = cleanSet->min_g; set->mu_g = cleanSet->mu_g; set->mu_vec = cleanSet->mu_vec; set->var_g = cleanSet->var_g; set->prob = header->prob; set->id = bin; collection[bin] = set; } } else { if (collection[bin]->header->slice_type == header->slice_type) newFeatureFile(steg, collection[bin], path); } } int FeatureCollection::getNumSets() { return collection.size(); } featureSet* FeatureCollection::getFeatureSet(int index) { int i = 0; featureSet *set = NULL; map<int, featureSet*>::iterator iter; for (iter = collection.begin(); iter != collection.end(); iter++) { if (i++ == index) { set = iter->second; break; } } return set; } // int FeatureCollection::hasNext() { // if (current_set < num_sets) return 1; // else return 0; // } FeatureCollection::Iterator* FeatureCollection::iterator() { return new FeatureCollection::Iterator(this); } FeatureCollection::Iterator::Iterator(FeatureCollection* f) { fc = f; iter = f->collection.begin(); } bool FeatureCollection::Iterator::hasNext() { return (iter != fc->collection.end()); } featureSet* FeatureCollection::Iterator::next() { featureSet *set = iter->second; iter++; return set; } StegoModel::StegoModel() { int i, j; // features = new FeatureCollection(); // current_view = 0; ranges = 0; steg = init_stego(); cleanSet = 0; mc = 0; seenPaths = new set<string>(); // for (i = 0; i < 10; i++) { // for (j = 0; j < 8; j++) { // collections[i][j] = 0; // } // } } StegoModel::~StegoModel() { if (mc != 0) closeMMD(*mc); close_stego(steg); } void StegoModel::estimateMus() { map< pair< int, int >, FeatureCollection* >::iterator fiter; FeatureCollection::Iterator *citer; // featureSet *stego; steg->features = cleanSet; // printf("activating something \n"); startAction(steg->features); // printf("estimating some mu... \n"); estimateMu(steg); // printf("done \n"); // estimateSigma(steg); // qrHouseholder(steg); endAction(steg->features); for (fiter = collections.begin(); fiter != collections.end(); fiter++) { if (fiter->second != 0) { // printf("<%i, %i> \n", fiter->first.first, fiter->first.second); citer = fiter->second->iterator(); while (citer->hasNext()) { steg->features = citer->next(); // printf("About to estimate mu with dim=%i, M=%i \n", steg->features->dim, steg->features->M); startAction(steg->features); estimateMu(steg); endAction(steg->features); // progressChanged((double) i / (double) j); } } } steg->features = cleanSet; modelChanged(); } // we don't want to give sets directly, rather indices inside the collection double StegoModel::doMMD(featureSet *clean, featureSet *stego) { // mmdContext mc; if (mc == 0) { mc = (mmdContext*) malloc(sizeof(mmdContext)); mc->clean = clean; mc->stego = stego; initMMD(steg, *mc); estimateGamma(steg, *mc); } mc->stego = stego; // mc->stego = mc->clean; estimateMMD(steg, *mc); // printf("used gamma: %g \n", mc->gamma); return mc->mmd; } void StegoModel::doMMDs() { map< pair< int, int >, FeatureCollection* >::iterator fiter; FeatureCollection::Iterator *citer; // featureSet *stego; mc = (mmdContext*) malloc(sizeof(mmdContext)); mc->clean = cleanSet; startAction(mc->clean); initMMD(steg, *mc); estimateGamma(steg, *mc); // mc->stego = mc->clean; // estimateMMD(steg, *mc); for (fiter = collections.begin(); fiter != collections.end(); fiter++) { if (fiter->second != 0) { printf("<%i, %i> \n", fiter->first.first, fiter->first.second); citer = fiter->second->iterator(); while (citer->hasNext()) { mc->stego = citer->next(); printf("doing set %g \n", mc->stego->header->prob); startAction(mc->stego); estimateMMD(steg, *mc); mc->stego->mmd = mc->mmd; endAction(mc->stego); } } } endAction(mc->clean); closeMMD(*mc); } void StegoModel::runClassifier() { int dim = cleanSet->dim; cout << "dim = " << dim << endl; int read; int samples = 1000; // cleanSet->M double gamma; double *vec_g, vec[dim]; StegoClassifier sf(dim); featureSet *stegoSet; FeatureCollection *col; CUDA_CALL( cudaMalloc(&vec_g, dim*sizeof(double))); startAction(cleanSet); // for (int n = 0; n < samples; n++) { // readVectorRescaled(steg, cleanSet, vec_g); // CUDA_CALL( cudaMemcpy(vec, vec_g, dim*sizeof(double), cudaMemcpyDefault)); // sf.addCleanVector(vec); // } // endAction(cleanSet); // one should iterate properly here col = collections.at(pair<int, int>(1, 1)); cout << "col has " << col->getNumSets() << " sets\n"; stegoSet = col->getFeatureSet(col->getNumSets()-1); startAction(stegoSet); for (int n = 0; n < samples; n++) { readVectorRescaled(steg, cleanSet, vec_g); CUDA_CALL( cudaMemcpy(vec, vec_g, dim*sizeof(double), cudaMemcpyDefault)); sf.addCleanVector(vec); readVectorRescaled(steg, stegoSet, vec_g); CUDA_CALL( cudaMemcpy(vec, vec_g, dim*sizeof(double), cudaMemcpyDefault)); sf.addStegoVector(vec); } endAction(stegoSet); endAction(cleanSet); // cout << "estimating gamma\n"; // mc = (mmdContext*) malloc(sizeof(mmdContext)); // mc->clean = cleanSet; // startAction(mc->clean); // initMMD(steg, *mc); // estimateGamma(steg, *mc); // gamma = mc->gamma; // endAction(cleanSet); // free(mc); sf.runSVM(0.438101); } void StegoModel::setFeatures(featureSet* set) { steg->features = set; modelChanged(); } void StegoModel::addView(StegoView *view) { views.push_back(view); // current_view++; } void StegoModel::modelChanged() { int i; list< StegoView* >::iterator siter; for (siter = views.begin(); siter != views.end(); siter++) { // printf("updateing some view \n"); (*siter)->updateView(); } } void StegoModel::collectionChanged() { int i; list< StegoView* >::iterator siter; for (siter = views.begin(); siter != views.end(); siter++) { (*siter)->updateCollection(); } } void StegoModel::progressChanged(double p) { int i; list< StegoView* >::iterator siter; for (siter = views.begin(); siter != views.end(); siter++) { (*siter)->updateProgress(p); } } void StegoModel::openDirectory(const char* path) { int i, j, k; int num_sets = 0; int bin; char *str = (char*) malloc(512*sizeof(char)); DIR *root = opendir(path); FILE *file; featureHeader header; struct dirent *entry; if (root == NULL) { // printf("root is NULL \n"); return; } // printf("Root not NULL. \n"); while ((entry = readdir(root)) != NULL) { if (strstr(entry->d_name, ".fv") != NULL) { num_sets++; } } // printf("about to open dir with %i feature files \n", num_sets); rewinddir(root); for(i = 0; i < num_sets; ) { entry = readdir(root); if (strstr(entry->d_name, ".fv") != NULL) { pathConcat(path, entry->d_name, str); openFile(str, i, num_sets, header); // file = fopen(str, "r"); // readHeader(file, &header); // if (ranges == 0) { // printf("first file is being added! \n"); // ranges = (int**) malloc(3*sizeof(int*)); // for (k = 0; k < 3; k++) { // ranges[k] = (int*) malloc(num_coefs[k]*sizeof(int)); // for (j = 0; j < num_coefs[k]; j++) // ranges[k][j] = header.ranges[k][j]; // } // } // fclose(file); // // // printf("method: %i \n", header.method); // // printf("qp range: %i \n", header.qp_range); // if (header.method == 0) { // if (cleanSet == 0) { // cleanSet = openFeatureSet(str, steg); // } else { // // printf("adding new feature file to clean set \n"); // newFeatureFile(steg, cleanSet, str); // } // } else { // if (collections[pair<int, int>(header.method,header.accept)] == 0) { // collections[pair<int, int>(header.method,header.accept)] = new FeatureCollection(&header); // printf("Created new collection for method %i and accept %i \n", header.method, header.accept); // } // collections[pair<int, int>(header.method, header.accept)]->addFeatureFile(str, &header, steg, cleanSet); // } // i++; // progressChanged((double) i / (double) num_sets); } } collectionChanged(); closedir(root); free(str); } int StegoModel::openFile(const char* path, int i, int num_sets, featureHeader &header) { int j, k; FILE *file; // featureHeader header; if (seenPaths->find(string(path)) != seenPaths->end()) { // printf("Das kenne ich doch schon %s :-@ \n", path); return 1; } file = fopen(path, "r"); readHeader(file, &header); if (ranges == 0) { ranges = (int**) malloc(3*sizeof(int*)); for (k = 0; k < 3; k++) { ranges[k] = (int*) malloc(num_coefs[k]*sizeof(int)); for (j = 0; j < num_coefs[k]; j++) ranges[k][j] = header.ranges[k][j]; } for (k = 0; k < 3; k++) { for (j = 0; j < num_coefs[k]; j++) { ranges[k][j] = 2 * (int) header.ranges[k][j] + 1; // printf("ranges[%i][%i] = %i \n", k, j, ranges[k][j]); } } } fclose(file); if (header.method == 0) { if (cleanSet == 0) { cleanSet = openFeatureSet(path, steg); } else { newFeatureFile(steg, cleanSet, path); } } else { if (collections[pair<int, int>(header.method,header.accept)] == 0) { collections[pair<int, int>(header.method,header.accept)] = new FeatureCollection(&header); } collections[pair<int, int>(header.method, header.accept)]->addFeatureFile(path, &header, steg, cleanSet); } seenPaths->insert(string(path)); progressChanged((double) i / (double) num_sets); return 0; // collectionChanged(); // maybe a bit inefficient to run this on every file, might be hundreds } StegoModel::Iterator::Iterator(StegoModel* m) { model = m; iter = m->collections.begin(); } bool StegoModel::Iterator::hasNext() { return (iter != model->collections.end()); } FeatureCollection* StegoModel::Iterator::next() { FeatureCollection *fc = iter->second; x = iter->first; iter++; return fc; } std::pair< int, int > StegoModel::Iterator::getX() { return x; } StegoModel::Iterator* StegoModel::iterator() { return new StegoModel::Iterator(this); } // FeatureCollection::Iterator* StegoModel::getFeatureIterator(int video_birate, int ppair, int method, int accept) { // // if (method < 0 || method >= 10) return 0; // // if (accept < 0 || accept >= 8) return 0; // // if (collections[method][accept] != 0) { // // return collections[method][accept]->iterator(); // // } // if (collections[pair<int, int>(method, accept)] != 0) { // return collections[pair<int, int>(method, accept)]->iterator(); // } // return 0; // } featureSet* StegoModel::getCleanSet() { return cleanSet; } int StegoModel::getQPRange() { if (cleanSet == 0) return -1; return cleanSet->header->qp_range; } int** StegoModel::getRanges() { return ranges; } int StegoModel::getDimension() { if (steg->features == NULL) return -1; return steg->features->dim; } int StegoModel::getHistDim() { if (cleanSet == 0) return -1; return cleanSet->hist_dim; } int StegoModel::getPairDim() { if (cleanSet == 0) return -1; return cleanSet->pair_dim; } int StegoModel::getUvsVDim() { if (cleanSet == 0) return -1; return cleanSet->uvsv_dim; } double* StegoModel::getMaxVector() { if (steg->features == NULL) return NULL; return steg->features->max_vec; } double* StegoModel::getMuVector() { if (steg->features == NULL) return NULL; return steg->features->gauss->mu; } double* StegoModel::getQPHist() { if (steg->features == NULL) return NULL; return steg->features->qp_vec; } // FeatureCollection* StegoModel::getCollection() { // // return fcol; // fcol // return 0; // } int StegoModel::getSigmaDim() { if (mc == 0) return -1; return mc->cache; } double* StegoModel::getSigma() { if (steg->features == 0) return 0; return steg->features->gauss->sigma; // if (mc == 0) return 0; // return mc->results; } double* StegoModel::getDiag() { if (steg->features == NULL) return NULL; return steg->features->gauss->qr_diag; } // StegoModel::Iterator* StegoModel::getIterator() { // return new Iterator(this); // } // // StegoModel::Iterator::Iterator(StegoModel* model) { // level = 0; // level1 = -1; // level3 = -1; // // level0iter = model->collections.iterator(); // next(); // } // // int StegoModel::Iterator::getLevel0() { // // return *level0iter->first; // } // // int StegoModel::Iterator::getLevel1() { // return level1; // } // // int StegoModel::Iterator::getLevel2() { // // return level2iter->first; // } // // int StegoModel::Iterator::getLevel3() { // return level3; // } // // bool StegoModel::Iterator::hasNext() { // return true; // } // // void StegoModel::Iterator::next() { // int i = 3; // // } // // FeatureCollection::Iterator* StegoModel::Iterator::nextCollectionIterator() { // // FeatureCollection::Iterator* iter = level3iter->iterator(); // // next(); // // return iter; // // }
574072f52b3e07abe516e245c3eef4e0a71743e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cute.h" #include "ide_listener.h" #include "cute_runner.h" #include "array2dTest.cuh" #include "host_array2d.h" #include "device_array2d.h" template<typename DeviceOp> __global__ void run_device_kernel(DeviceOp op) { op(); } template<typename DeviceOp> void test_device(DeviceOp op, dim3 gDim = 1, dim3 bDim = 1) { hipLaunchKernelGGL(( run_device_kernel), dim3(gDim),dim3(bDim), 0, 0, op); } // host side tests void testInit() { cudapp::host_array2d<int> a(10, 10); ASSERT_EQUAL(100, a.size); ASSERT_EQUAL(10, a.dimX); ASSERT_EQUAL(10, a.dimY); for(int i = 0; i < a.size; i++) { ASSERT_EQUAL(0, *a[i]); } } void testInitZero() { ASSERTM("Implement this test and code.", false); //cudapp::array2d<int> a(1,0); //cudapp::array2d<int> a(0,1); //cudapp::array2d<int> a(0,0); } void testSet() { cudapp::host_array2d<int> a(10, 10); a.set(0, 0, 10); a.set(5,4); ASSERT_EQUAL(10, a.get(0,0)); ASSERT_EQUAL(1, a.get(5,4)); } void testConstructionCtor(){ cudapp::host_array2d<int> a(10, 10); cudapp::device_array2d<int> b = a; ASSERT_EQUAL(100, b.size); ASSERT_EQUAL(10, b.dimX); ASSERT_EQUAL(10, b.dimY); } // device test struct device_inc { device_inc(cudapp::device_array2d<int> &a) : a(a) {} __device__ void operator()() { a.inc(5, 5, 10); } cudapp::device_array2d<int> a; }; void testDeviceInc(){ cudapp::host_array2d<int> a(10, 10); ASSERT_EQUAL(0, a.get(5,5)); cudapp::device_array2d<int> b = a; test_device(device_inc(b)); a = b; ASSERT_EQUAL(10, a.get(5,5)); } cute::suite make_suite_array2d() { cute::suite s; s.push_back(CUTE(testSet)); s.push_back(CUTE(testInit)); s.push_back(CUTE(testInitZero)); s.push_back(CUTE(testConstructionCtor)); s.push_back(CUTE(testDeviceInc)); return s; }
574072f52b3e07abe516e245c3eef4e0a71743e5.cu
#include "cute.h" #include "ide_listener.h" #include "cute_runner.h" #include "array2dTest.cuh" #include "host_array2d.h" #include "device_array2d.h" template<typename DeviceOp> __global__ void run_device_kernel(DeviceOp op) { op(); } template<typename DeviceOp> void test_device(DeviceOp op, dim3 gDim = 1, dim3 bDim = 1) { run_device_kernel<<<gDim,bDim>>>(op); } // host side tests void testInit() { cudapp::host_array2d<int> a(10, 10); ASSERT_EQUAL(100, a.size); ASSERT_EQUAL(10, a.dimX); ASSERT_EQUAL(10, a.dimY); for(int i = 0; i < a.size; i++) { ASSERT_EQUAL(0, *a[i]); } } void testInitZero() { ASSERTM("Implement this test and code.", false); //cudapp::array2d<int> a(1,0); //cudapp::array2d<int> a(0,1); //cudapp::array2d<int> a(0,0); } void testSet() { cudapp::host_array2d<int> a(10, 10); a.set(0, 0, 10); a.set(5,4); ASSERT_EQUAL(10, a.get(0,0)); ASSERT_EQUAL(1, a.get(5,4)); } void testConstructionCtor(){ cudapp::host_array2d<int> a(10, 10); cudapp::device_array2d<int> b = a; ASSERT_EQUAL(100, b.size); ASSERT_EQUAL(10, b.dimX); ASSERT_EQUAL(10, b.dimY); } // device test struct device_inc { device_inc(cudapp::device_array2d<int> &a) : a(a) {} __device__ void operator()() { a.inc(5, 5, 10); } cudapp::device_array2d<int> a; }; void testDeviceInc(){ cudapp::host_array2d<int> a(10, 10); ASSERT_EQUAL(0, a.get(5,5)); cudapp::device_array2d<int> b = a; test_device(device_inc(b)); a = b; ASSERT_EQUAL(10, a.get(5,5)); } cute::suite make_suite_array2d() { cute::suite s; s.push_back(CUTE(testSet)); s.push_back(CUTE(testInit)); s.push_back(CUTE(testInitZero)); s.push_back(CUTE(testConstructionCtor)); s.push_back(CUTE(testDeviceInc)); return s; }
f2cb32b5e3c297a11dacd7463cc9614a9a889906.hip
// !!! This is a file automatically generated by hipify!!! #include "../header/cudatool.h" //ErrorHandling for CUDA functions hipError_t cudaErrT(hipError_t err, int line, char* file){ //, int line, char* file){ #if defined(DEBUG) || defined(_DEBUG) if (err != hipSuccess){ printf( "\n*** Cuda error in file '%s' in line %i : %s. ***\n\n", file, line, hipGetErrorString(err)); exit(EXIT_FAILURE); } #endif return err; } //TODO compare muss umgebaut werden zu width * height!!! //Matrix gegen einander testen void compareMatrix(double *P1, double *P2, int N, char name1[25], char name2[25]){ #if defined(DEBUG)||defined(_DEBUG) double epsilon = 1.0e-8; //Fehlertoleranz // int match = 1; for (int i = 0; i < N; ++i){ if (abs(P1[i] - P2[i]) > epsilon){ // match = 0; printf("Arrays do not match!\nCompare between %s & %s!\n", name1, name2); printf("M1:%5.10f M2:%5.10f at Element %d\n\n", P1[i], P2[i], i); break; } } // if (match) printf("Arrays match!\nCompare between %s & %s.\n\n", name1, name2); #endif } //TODO compare muss umgebaut werden zu width * height!!! //Matrix gegen einander testen void compareMatrix(float *P1, float *P2, int N, char name1[25], char name2[25]){ #if defined(DEBUG)||defined(_DEBUG) double epsilon = 1.0e-8; //Fehlertoleranz // int match = 1; for (int i = 0; i < N; ++i){ if (abs(P1[i] - P2[i]) > epsilon){ // match = 0; printf("\nArrays do not match!\nCompare between %s & %s!\n", name1, name2); printf("M1:%5.10f M2:%5.10f at Element %d\n\n", P1[i], P2[i], i); break; } } // if (match) printf("Arrays match!\nCompare between %s & %s.\n\n", name1, name2); #endif } //Matrix fuellen void initMatrix(float *ip, int size){ //random seed erstellen time_t t; srand((unsigned)time(&t)); //Matrix auffuellen for (int i = 0; i < size; ++i){ ip[i] = (float)(rand() & 0xFF) / 100.0f; } } //Ausgabe welcher Teil gestartet wird void preProcess(char *_name){ printf("...%s...\n", _name ); } //Ausgabe der Ergebnisse, tElapsed in sekunden void postProcessOMP(int nReps, int memSize, double tElapsed, char *_type){ printf("Type: %s\tTime elapsed: %.5f ms\t",_type , 1e3* (tElapsed / nReps )); printf("Bandwidth: %.3f GB/s\n", ( ((2. * memSize) / (BYTE_TO_GBYTE)) / (tElapsed / nReps) )); } //Ausgabe der Ergebnisse, tElapsed in millisekunden void postProcess(int nReps, int memSize, double tElapsed, char *_type){ printf("Type: %s\tTime elapsed: %.5f ms\t",_type , (tElapsed / nReps )); printf("Bandwidth: %.3f GB/s\n", ( ((2. * memSize) / (BYTE_TO_GBYTE)) / ((tElapsed / 1e3) / nReps) )); }
f2cb32b5e3c297a11dacd7463cc9614a9a889906.cu
#include "../header/cudatool.h" //ErrorHandling for CUDA functions cudaError_t cudaErrT(cudaError_t err, int line, char* file){ //, int line, char* file){ #if defined(DEBUG) || defined(_DEBUG) if (err != cudaSuccess){ printf( "\n*** Cuda error in file '%s' in line %i : %s. ***\n\n", file, line, cudaGetErrorString(err)); exit(EXIT_FAILURE); } #endif return err; } //TODO compare muss umgebaut werden zu width * height!!! //Matrix gegen einander testen void compareMatrix(double *P1, double *P2, int N, char name1[25], char name2[25]){ #if defined(DEBUG)||defined(_DEBUG) double epsilon = 1.0e-8; //Fehlertoleranz // int match = 1; for (int i = 0; i < N; ++i){ if (abs(P1[i] - P2[i]) > epsilon){ // match = 0; printf("Arrays do not match!\nCompare between %s & %s!\n", name1, name2); printf("M1:%5.10f M2:%5.10f at Element %d\n\n", P1[i], P2[i], i); break; } } // if (match) printf("Arrays match!\nCompare between %s & %s.\n\n", name1, name2); #endif } //TODO compare muss umgebaut werden zu width * height!!! //Matrix gegen einander testen void compareMatrix(float *P1, float *P2, int N, char name1[25], char name2[25]){ #if defined(DEBUG)||defined(_DEBUG) double epsilon = 1.0e-8; //Fehlertoleranz // int match = 1; for (int i = 0; i < N; ++i){ if (abs(P1[i] - P2[i]) > epsilon){ // match = 0; printf("\nArrays do not match!\nCompare between %s & %s!\n", name1, name2); printf("M1:%5.10f M2:%5.10f at Element %d\n\n", P1[i], P2[i], i); break; } } // if (match) printf("Arrays match!\nCompare between %s & %s.\n\n", name1, name2); #endif } //Matrix fuellen void initMatrix(float *ip, int size){ //random seed erstellen time_t t; srand((unsigned)time(&t)); //Matrix auffuellen for (int i = 0; i < size; ++i){ ip[i] = (float)(rand() & 0xFF) / 100.0f; } } //Ausgabe welcher Teil gestartet wird void preProcess(char *_name){ printf("...%s...\n", _name ); } //Ausgabe der Ergebnisse, tElapsed in sekunden void postProcessOMP(int nReps, int memSize, double tElapsed, char *_type){ printf("Type: %s\tTime elapsed: %.5f ms\t",_type , 1e3* (tElapsed / nReps )); printf("Bandwidth: %.3f GB/s\n", ( ((2. * memSize) / (BYTE_TO_GBYTE)) / (tElapsed / nReps) )); } //Ausgabe der Ergebnisse, tElapsed in millisekunden void postProcess(int nReps, int memSize, double tElapsed, char *_type){ printf("Type: %s\tTime elapsed: %.5f ms\t",_type , (tElapsed / nReps )); printf("Bandwidth: %.3f GB/s\n", ( ((2. * memSize) / (BYTE_TO_GBYTE)) / ((tElapsed / 1e3) / nReps) )); }
466f99c8ca6750bdbb5368ace916d9aba185a3e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // old op include, fluid should be removed #ifdef PADDLE_WITH_HIP #include <hipcub/hipcub.hpp> namespace cub = hipcub; #else #include <hipcub/hipcub.hpp> #endif #include <vector> #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/kernels/funcs/axis_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/impl/softmax_kernel_impl.h" #include "paddle/phi/kernels/margin_cross_entropy_grad_kernel.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/visit_type.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/distributed/collective/process_group.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif #include "paddle/phi/backends/gpu/gpu_context.h" namespace phi { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T, typename Context> void GetClassInterval(const gpuStream_t& stream, const phi::Place& place, const Context& dev_ctx, const int rid, const int rank, const int nranks, const int D, DenseTensor* class_interval) { std::vector<int> shard_dim_vec(nranks + 1, 0); shard_dim_vec[rank + 1] = D; if (nranks <= 1) { phi::TensorFromVector(shard_dim_vec, dev_ctx, class_interval); return; } #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) DenseTensor num_classes_per_device; phi::TensorFromVector(shard_dim_vec, dev_ctx, &num_classes_per_device); int* num_classes_per_device_ptr = num_classes_per_device.data<int>(); auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(rid)) { // Use ProcessGroup paddle::distributed::ProcessGroup* pg = map->get(rid); std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(num_classes_per_device); out_tensor.push_back(num_classes_per_device); paddle::distributed::AllreduceOptions opts; opts.reduce_op = paddle::distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { const auto& comm = paddle::platform::NCCLCommContext::Instance().Get(rid, place); // use global calculate stream const auto calcu_stream = static_cast<GPUContext*>( paddle::platform::DeviceContextPool::Instance().Get(place)) ->stream(); PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce( num_classes_per_device_ptr, num_classes_per_device_ptr, num_classes_per_device.numel(), paddle::platform::ToNCCLDataType(paddle::framework::TransToProtoVarType( num_classes_per_device.dtype())), ncclSum, comm->comm(), calcu_stream)); } class_interval->Resize({nranks + 1}); auto class_interval_ptr = dev_ctx.template Alloc<int>(class_interval); size_t cub_temp_storage_bytes = 0; hipcub::DeviceScan::InclusiveSum<int*, int*>( nullptr, cub_temp_storage_bytes, nullptr, nullptr, nranks + 1, stream); auto cub_temp_storage = phi::memory_utils::Alloc(place, cub_temp_storage_bytes); hipcub::DeviceScan::InclusiveSum<int*, int*>(cub_temp_storage->ptr(), cub_temp_storage_bytes, num_classes_per_device_ptr, class_interval_ptr, nranks + 1, stream); return; #endif } template <typename T, typename IndexT> __global__ void CalculateGrad(T* logits_grad, const T* loss_grad, const T* logits, const IndexT* label, const float margin1, const float margin2, const float scale, const int rank, const int64_t N, const int64_t D, const int* class_interval_ptr) { using MPType = typename phi::dtype::MPTypeTrait<T>::Type; int start_index = class_interval_ptr[rank]; CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; auto col = i % D; if ((col + start_index) == label[row]) { logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row]; if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) { MPType dout = static_cast<MPType>(logits_grad[i]); MPType one = static_cast<MPType>(1.0f); MPType x = static_cast<MPType>(logits[i]); MPType m1 = static_cast<MPType>(margin1); MPType m2 = static_cast<MPType>(margin2); MPType d = m1 * sin(m1 * acos(x) + m2) / sqrt(one - x * x); logits_grad[i] = static_cast<T>(dout * d); } } else { logits_grad[i] *= loss_grad[row]; } if (fabs(scale - 1.0) > 1e-8) { logits_grad[i] *= static_cast<T>(scale); } } } template <typename T, typename Context> void MarginCrossEntropyGradKernel(const Context& dev_ctx, const DenseTensor& logits, const DenseTensor& label, const DenseTensor& softmax, const DenseTensor& loss_grad, bool return_softmax, int ring_id, int rank, int nranks, float margin1, float margin2, float margin3, float scale, DenseTensor* logits_grad) { const auto softmax_dims = softmax.dims(); const int axis = softmax_dims.size() - 1; const int N = phi::funcs::SizeToAxis(axis, softmax_dims); const int D = phi::funcs::SizeFromAxis(axis, softmax_dims); if (return_softmax) { phi::Copy<Context>( dev_ctx, softmax, dev_ctx.GetPlace(), false, logits_grad); } else { logits_grad->ShareDataWith(softmax); } int blocks = NumBlocks(N * D); int threads = kNumCUDAThreads; const auto& label_type = paddle::framework::TransToProtoVarType(label.dtype()); DenseTensor class_interval; GetClassInterval<T, Context>(dev_ctx.stream(), dev_ctx.GetPlace(), dev_ctx, ring_id, rank, nranks, D, &class_interval); if (label_type == paddle::framework::proto::VarType::INT32) { typedef int32_t LabelT; hipLaunchKernelGGL(( CalculateGrad<T, LabelT>) , dim3(blocks), dim3(threads), 0, dev_ctx.stream(), logits_grad->data<T>(), loss_grad.data<T>(), logits.data<T>(), label.data<LabelT>(), margin1, margin2, scale, rank, N, D, class_interval.data<int>()); } else if (label_type == paddle::framework::proto::VarType::INT64) { typedef int64_t LabelT; hipLaunchKernelGGL(( CalculateGrad<T, LabelT>) , dim3(blocks), dim3(threads), 0, dev_ctx.stream(), logits_grad->data<T>(), loss_grad.data<T>(), logits.data<T>(), label.data<LabelT>(), margin1, margin2, scale, rank, N, D, class_interval.data<int>()); } } } // namespace phi PD_REGISTER_KERNEL(margin_cross_entropy_grad, GPU, ALL_LAYOUT, phi::MarginCrossEntropyGradKernel, float, double, phi::dtype::float16) {}
466f99c8ca6750bdbb5368ace916d9aba185a3e4.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // old op include, fluid should be removed #ifdef PADDLE_WITH_HIP #include <hipcub/hipcub.hpp> namespace cub = hipcub; #else #include <cub/cub.cuh> #endif #include <vector> #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/kernels/funcs/axis_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/impl/softmax_kernel_impl.h" #include "paddle/phi/kernels/margin_cross_entropy_grad_kernel.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/visit_type.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/distributed/collective/process_group.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif #include "paddle/phi/backends/gpu/gpu_context.h" namespace phi { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T, typename Context> void GetClassInterval(const gpuStream_t& stream, const phi::Place& place, const Context& dev_ctx, const int rid, const int rank, const int nranks, const int D, DenseTensor* class_interval) { std::vector<int> shard_dim_vec(nranks + 1, 0); shard_dim_vec[rank + 1] = D; if (nranks <= 1) { phi::TensorFromVector(shard_dim_vec, dev_ctx, class_interval); return; } #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) DenseTensor num_classes_per_device; phi::TensorFromVector(shard_dim_vec, dev_ctx, &num_classes_per_device); int* num_classes_per_device_ptr = num_classes_per_device.data<int>(); auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(rid)) { // Use ProcessGroup paddle::distributed::ProcessGroup* pg = map->get(rid); std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(num_classes_per_device); out_tensor.push_back(num_classes_per_device); paddle::distributed::AllreduceOptions opts; opts.reduce_op = paddle::distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { const auto& comm = paddle::platform::NCCLCommContext::Instance().Get(rid, place); // use global calculate stream const auto calcu_stream = static_cast<GPUContext*>( paddle::platform::DeviceContextPool::Instance().Get(place)) ->stream(); PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce( num_classes_per_device_ptr, num_classes_per_device_ptr, num_classes_per_device.numel(), paddle::platform::ToNCCLDataType(paddle::framework::TransToProtoVarType( num_classes_per_device.dtype())), ncclSum, comm->comm(), calcu_stream)); } class_interval->Resize({nranks + 1}); auto class_interval_ptr = dev_ctx.template Alloc<int>(class_interval); size_t cub_temp_storage_bytes = 0; cub::DeviceScan::InclusiveSum<int*, int*>( nullptr, cub_temp_storage_bytes, nullptr, nullptr, nranks + 1, stream); auto cub_temp_storage = phi::memory_utils::Alloc(place, cub_temp_storage_bytes); cub::DeviceScan::InclusiveSum<int*, int*>(cub_temp_storage->ptr(), cub_temp_storage_bytes, num_classes_per_device_ptr, class_interval_ptr, nranks + 1, stream); return; #endif } template <typename T, typename IndexT> __global__ void CalculateGrad(T* logits_grad, const T* loss_grad, const T* logits, const IndexT* label, const float margin1, const float margin2, const float scale, const int rank, const int64_t N, const int64_t D, const int* class_interval_ptr) { using MPType = typename phi::dtype::MPTypeTrait<T>::Type; int start_index = class_interval_ptr[rank]; CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; auto col = i % D; if ((col + start_index) == label[row]) { logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row]; if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) { MPType dout = static_cast<MPType>(logits_grad[i]); MPType one = static_cast<MPType>(1.0f); MPType x = static_cast<MPType>(logits[i]); MPType m1 = static_cast<MPType>(margin1); MPType m2 = static_cast<MPType>(margin2); MPType d = m1 * sin(m1 * acos(x) + m2) / sqrt(one - x * x); logits_grad[i] = static_cast<T>(dout * d); } } else { logits_grad[i] *= loss_grad[row]; } if (fabs(scale - 1.0) > 1e-8) { logits_grad[i] *= static_cast<T>(scale); } } } template <typename T, typename Context> void MarginCrossEntropyGradKernel(const Context& dev_ctx, const DenseTensor& logits, const DenseTensor& label, const DenseTensor& softmax, const DenseTensor& loss_grad, bool return_softmax, int ring_id, int rank, int nranks, float margin1, float margin2, float margin3, float scale, DenseTensor* logits_grad) { const auto softmax_dims = softmax.dims(); const int axis = softmax_dims.size() - 1; const int N = phi::funcs::SizeToAxis(axis, softmax_dims); const int D = phi::funcs::SizeFromAxis(axis, softmax_dims); if (return_softmax) { phi::Copy<Context>( dev_ctx, softmax, dev_ctx.GetPlace(), false, logits_grad); } else { logits_grad->ShareDataWith(softmax); } int blocks = NumBlocks(N * D); int threads = kNumCUDAThreads; const auto& label_type = paddle::framework::TransToProtoVarType(label.dtype()); DenseTensor class_interval; GetClassInterval<T, Context>(dev_ctx.stream(), dev_ctx.GetPlace(), dev_ctx, ring_id, rank, nranks, D, &class_interval); if (label_type == paddle::framework::proto::VarType::INT32) { typedef int32_t LabelT; CalculateGrad<T, LabelT> <<<blocks, threads, 0, dev_ctx.stream()>>>(logits_grad->data<T>(), loss_grad.data<T>(), logits.data<T>(), label.data<LabelT>(), margin1, margin2, scale, rank, N, D, class_interval.data<int>()); } else if (label_type == paddle::framework::proto::VarType::INT64) { typedef int64_t LabelT; CalculateGrad<T, LabelT> <<<blocks, threads, 0, dev_ctx.stream()>>>(logits_grad->data<T>(), loss_grad.data<T>(), logits.data<T>(), label.data<LabelT>(), margin1, margin2, scale, rank, N, D, class_interval.data<int>()); } } } // namespace phi PD_REGISTER_KERNEL(margin_cross_entropy_grad, GPU, ALL_LAYOUT, phi::MarginCrossEntropyGradKernel, float, double, phi::dtype::float16) {}
f50e5bcf03555a35bfd414e88fc218b3c19aeb49.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/dropout_op.h" #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { __global__ void DropoutKernel(const int N, const float ratio, const float* Xdata, float* Ydata, bool* maskdata) { const float scale = 1. / (1. - ratio); CUDA_1D_KERNEL_LOOP(i, N) { maskdata[i] = (Ydata[i] > ratio); Ydata[i] = Xdata[i] * scale * maskdata[i]; } } } // namespace template <> bool DropoutOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); auto* mask = Output(1); Y->Resize(X.dims()); mask->Resize(X.dims()); if (is_test_) { if (Y != &X) { context_.Copy<float, CUDAContext, CUDAContext>( X.size(), X.data<float>(), Y->mutable_data<float>()); } return true; } else { // We do a simple trick here: since hiprand cannot generate random // boolean numbers, we will generate into dY and write the result to // mask. float* Ydata = Y->mutable_data<float>(); CAFFE_ENFORCE(X.data<float>() != Ydata, "In-place GPU dropout is broken"); CURAND_ENFORCE( hiprandGenerateUniform(context_.curand_generator(), Ydata, X.size())); hipLaunchKernelGGL(( DropoutKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), ratio_, X.data<float>(), Ydata, mask->mutable_data<bool>()); return true; } } namespace { __global__ void DropoutGradientKernel(const int N, const float* dYdata, const bool* maskdata, const float scale, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { dXdata[i] = dYdata[i] * maskdata[i] * scale; } } } // namespace template <> bool DropoutGradientOp<float, CUDAContext>::RunOnDevice() { auto& dY = Input(0); auto& mask = Input(1); auto* dX = Output(0); DCHECK_EQ(dY.size(), mask.size()); dX->Resize(dY.dims()); if (is_test_) { if (dX != &dY) { context_.Copy<float, CUDAContext, CUDAContext>( dY.size(), dY.data<float>(), dX->mutable_data<float>()); } return true; } else { const float scale = 1. / (1. - ratio_); hipLaunchKernelGGL(( DropoutGradientKernel), dim3(CAFFE_GET_BLOCKS(dY.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), dY.size(), dY.data<float>(), mask.data<bool>(), scale, dX->mutable_data<float>()); return true; } } REGISTER_CUDA_OPERATOR(Dropout, DropoutOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(DropoutGrad, DropoutGradientOp<float, CUDAContext>); } // namespace caffe2
f50e5bcf03555a35bfd414e88fc218b3c19aeb49.cu
#include "caffe2/operators/dropout_op.h" #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { __global__ void DropoutKernel(const int N, const float ratio, const float* Xdata, float* Ydata, bool* maskdata) { const float scale = 1. / (1. - ratio); CUDA_1D_KERNEL_LOOP(i, N) { maskdata[i] = (Ydata[i] > ratio); Ydata[i] = Xdata[i] * scale * maskdata[i]; } } } // namespace template <> bool DropoutOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); auto* mask = Output(1); Y->Resize(X.dims()); mask->Resize(X.dims()); if (is_test_) { if (Y != &X) { context_.Copy<float, CUDAContext, CUDAContext>( X.size(), X.data<float>(), Y->mutable_data<float>()); } return true; } else { // We do a simple trick here: since curand cannot generate random // boolean numbers, we will generate into dY and write the result to // mask. float* Ydata = Y->mutable_data<float>(); CAFFE_ENFORCE(X.data<float>() != Ydata, "In-place GPU dropout is broken"); CURAND_ENFORCE( curandGenerateUniform(context_.curand_generator(), Ydata, X.size())); DropoutKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), ratio_, X.data<float>(), Ydata, mask->mutable_data<bool>()); return true; } } namespace { __global__ void DropoutGradientKernel(const int N, const float* dYdata, const bool* maskdata, const float scale, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { dXdata[i] = dYdata[i] * maskdata[i] * scale; } } } // namespace template <> bool DropoutGradientOp<float, CUDAContext>::RunOnDevice() { auto& dY = Input(0); auto& mask = Input(1); auto* dX = Output(0); DCHECK_EQ(dY.size(), mask.size()); dX->Resize(dY.dims()); if (is_test_) { if (dX != &dY) { context_.Copy<float, CUDAContext, CUDAContext>( dY.size(), dY.data<float>(), dX->mutable_data<float>()); } return true; } else { const float scale = 1. / (1. - ratio_); DropoutGradientKernel<<<CAFFE_GET_BLOCKS(dY.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( dY.size(), dY.data<float>(), mask.data<bool>(), scale, dX->mutable_data<float>()); return true; } } REGISTER_CUDA_OPERATOR(Dropout, DropoutOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(DropoutGrad, DropoutGradientOp<float, CUDAContext>); } // namespace caffe2
ec93baa05f1e47e88778aecb4be9ff584096fc66.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <string> #include <bitset> #include <iostream> #include "cudadp.h" #include "fasta_util.h" using namespace std; // Affine gap model #define MATCH 1 #define MISMATCH 1 #define Gopen -3 #define Gext -2 #define M 100000 #define N 100000 #define G (1000*1000*1000) struct Sequences { char *dev_A; char *dev_B; }; __inline__ __device__ int compute_j(int level, int problem_size) { int tid = blockIdx.x * THREADS + threadIdx.x; int j; if (level <= min(M-1, N-1)) { // up j = tid; } else if(level > max(M-1, N-1)) { // bottom j = N - problem_size + tid; } else { // middle j = level - min(M-1, N-1) + tid; } return j; } __inline__ __device__ void cudadp_user_kernel(int m, int n, int level, int problem_size, int3 *deps, void *data) { int tid = blockIdx.x * THREADS + threadIdx.x; //if(tid >= problem_size) return; struct Sequences* seq = (struct Sequences*)data; char *A = seq->dev_A; char *B = seq->dev_B; int3 *dep1 = deps; int3 *dep2 = &deps[min(m, n)]; // read dependencies from global memory to shared memory __shared__ int3 local_dep1[THREADS+2]; __shared__ int3 local_dep2[THREADS+2]; if(tid < min(m, n)) { local_dep1[threadIdx.x+1] = dep1[tid]; local_dep2[threadIdx.x+1] = dep2[tid]; } if(threadIdx.x == THREADS-1 && tid < min(M, N) ) { local_dep1[threadIdx.x+2] = dep1[tid+1]; local_dep2[threadIdx.x+2] = dep2[tid+1]; } __syncthreads(); int i, j; j = compute_j(level, problem_size); i = level - j; char Bj = B[j]; // j, B[j] are not changing during following steps int3 diag, left, up, result; for(int k = 0; k < THREADS/2+1; k++, i++, level++) { if(level >= M+N-1) return; if(threadIdx.x>=THREADS-k) { //printf("level:%d (%d, %d)\n", level, i, j); if (level <= min(M-1, N-1)) { // up, depends on tid-1, tid left = local_dep2[threadIdx.x]; //up = local_dep2[threadIdx.x+1]; up = result; diag = local_dep1[threadIdx.x]; } else { // middle and bottom, depends on tid, tid+1 //left = local_dep2[threadIdx.x+1]; left = result; up = local_dep2[threadIdx.x+2]; diag = local_dep1[threadIdx.x+2]; } result.x = max(left.x-Gext, left.z-Gopen); // E[i,j] result.y = max(up.y-Gext, up.z-Gopen); // F[i,j] result.z = max(0, diag.z + (A[i]==Bj?MATCH:MISMATCH)); // H[i,j] result.z = max3(result.z, result.x, result.y); // H[i,j] if(k == THREADS/2 || level==M+N-2) { // last level, write into global memory deps[tid] = result; } else { // intermediate levels, use local memory // swap dependency levels local_dep2[threadIdx.x] = local_dep1[threadIdx.x]; if(threadIdx.x == 0) { local_dep2[THREADS] = local_dep1[THREADS]; local_dep2[THREADS+1] = local_dep1[THREADS+1]; } local_dep1[threadIdx.x+1] = result; } } __syncthreads(); } } string random_string(int length) { //srand (time(0) ); string s(length, 'A'); const char alphabet[] = {'A', 'C', 'G', 'T'}; for(int i = 0; i < length; i++) { s[i] = alphabet[(rand() % 4)]; } return s; } int main(int argc, char *argv[]) { //string A = read_fasta_file(argv[1]); //string B = read_fasta_file(argv[1]); //string A = "GTCTTACATCCGTTCG"; //string B = "GTCTTACATCCGTTCG"; string A = random_string(M); string B = random_string(N); //printf("A:%s\nB:%s\n", A.c_str(), B.c_str()); struct Sequences seq; hipMalloc(&(seq.dev_A), sizeof(char) * A.length()); hipMalloc(&(seq.dev_B), sizeof(char) * B.length()); hipMemcpy(seq.dev_A, A.c_str(), sizeof(char)*A.length(), hipMemcpyHostToDevice); hipMemcpy(seq.dev_B, B.c_str(), sizeof(char)*B.length(), hipMemcpyHostToDevice); struct Sequences *dev_seq; hipMalloc(&dev_seq, sizeof(struct Sequences)); hipMemcpy(dev_seq, &seq, sizeof(struct Sequences), hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); cudadp_start(A.length(), B.length(), 2, dev_seq); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); float gcpus = A.length() * B.length() * 1.0 / G / (milliseconds/1000.0); printf("time:%f, GCPUS: %f\n", milliseconds/1000.0, gcpus); hipFree(seq.dev_A); hipFree(seq.dev_B); hipFree(dev_seq); return 0; }
ec93baa05f1e47e88778aecb4be9ff584096fc66.cu
#include <stdlib.h> #include <stdio.h> #include <string> #include <bitset> #include <iostream> #include "cudadp.h" #include "fasta_util.h" using namespace std; // Affine gap model #define MATCH 1 #define MISMATCH 1 #define Gopen -3 #define Gext -2 #define M 100000 #define N 100000 #define G (1000*1000*1000) struct Sequences { char *dev_A; char *dev_B; }; __inline__ __device__ int compute_j(int level, int problem_size) { int tid = blockIdx.x * THREADS + threadIdx.x; int j; if (level <= min(M-1, N-1)) { // up j = tid; } else if(level > max(M-1, N-1)) { // bottom j = N - problem_size + tid; } else { // middle j = level - min(M-1, N-1) + tid; } return j; } __inline__ __device__ void cudadp_user_kernel(int m, int n, int level, int problem_size, int3 *deps, void *data) { int tid = blockIdx.x * THREADS + threadIdx.x; //if(tid >= problem_size) return; struct Sequences* seq = (struct Sequences*)data; char *A = seq->dev_A; char *B = seq->dev_B; int3 *dep1 = deps; int3 *dep2 = &deps[min(m, n)]; // read dependencies from global memory to shared memory __shared__ int3 local_dep1[THREADS+2]; __shared__ int3 local_dep2[THREADS+2]; if(tid < min(m, n)) { local_dep1[threadIdx.x+1] = dep1[tid]; local_dep2[threadIdx.x+1] = dep2[tid]; } if(threadIdx.x == THREADS-1 && tid < min(M, N) ) { local_dep1[threadIdx.x+2] = dep1[tid+1]; local_dep2[threadIdx.x+2] = dep2[tid+1]; } __syncthreads(); int i, j; j = compute_j(level, problem_size); i = level - j; char Bj = B[j]; // j, B[j] are not changing during following steps int3 diag, left, up, result; for(int k = 0; k < THREADS/2+1; k++, i++, level++) { if(level >= M+N-1) return; if(threadIdx.x>=THREADS-k) { //printf("level:%d (%d, %d)\n", level, i, j); if (level <= min(M-1, N-1)) { // up, depends on tid-1, tid left = local_dep2[threadIdx.x]; //up = local_dep2[threadIdx.x+1]; up = result; diag = local_dep1[threadIdx.x]; } else { // middle and bottom, depends on tid, tid+1 //left = local_dep2[threadIdx.x+1]; left = result; up = local_dep2[threadIdx.x+2]; diag = local_dep1[threadIdx.x+2]; } result.x = max(left.x-Gext, left.z-Gopen); // E[i,j] result.y = max(up.y-Gext, up.z-Gopen); // F[i,j] result.z = max(0, diag.z + (A[i]==Bj?MATCH:MISMATCH)); // H[i,j] result.z = max3(result.z, result.x, result.y); // H[i,j] if(k == THREADS/2 || level==M+N-2) { // last level, write into global memory deps[tid] = result; } else { // intermediate levels, use local memory // swap dependency levels local_dep2[threadIdx.x] = local_dep1[threadIdx.x]; if(threadIdx.x == 0) { local_dep2[THREADS] = local_dep1[THREADS]; local_dep2[THREADS+1] = local_dep1[THREADS+1]; } local_dep1[threadIdx.x+1] = result; } } __syncthreads(); } } string random_string(int length) { //srand (time(0) ); string s(length, 'A'); const char alphabet[] = {'A', 'C', 'G', 'T'}; for(int i = 0; i < length; i++) { s[i] = alphabet[(rand() % 4)]; } return s; } int main(int argc, char *argv[]) { //string A = read_fasta_file(argv[1]); //string B = read_fasta_file(argv[1]); //string A = "GTCTTACATCCGTTCG"; //string B = "GTCTTACATCCGTTCG"; string A = random_string(M); string B = random_string(N); //printf("A:%s\nB:%s\n", A.c_str(), B.c_str()); struct Sequences seq; cudaMalloc(&(seq.dev_A), sizeof(char) * A.length()); cudaMalloc(&(seq.dev_B), sizeof(char) * B.length()); cudaMemcpy(seq.dev_A, A.c_str(), sizeof(char)*A.length(), cudaMemcpyHostToDevice); cudaMemcpy(seq.dev_B, B.c_str(), sizeof(char)*B.length(), cudaMemcpyHostToDevice); struct Sequences *dev_seq; cudaMalloc(&dev_seq, sizeof(struct Sequences)); cudaMemcpy(dev_seq, &seq, sizeof(struct Sequences), cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudadp_start(A.length(), B.length(), 2, dev_seq); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); float gcpus = A.length() * B.length() * 1.0 / G / (milliseconds/1000.0); printf("time:%f, GCPUS: %f\n", milliseconds/1000.0, gcpus); cudaFree(seq.dev_A); cudaFree(seq.dev_B); cudaFree(dev_seq); return 0; }
d725647e95fe180375439d27f9382a01b4bb85d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cassert> #include "common.h" #include <random> static constexpr int SIZE = 100 * 1024 * 1024; __global__ void histo_kernel(unsigned char *buffer, int *histo) { int i = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while (i < SIZE) { atomicAdd(&histo[buffer[i]], 1); i += stride; } } int main() { cuda::vector<unsigned char> buffer(SIZE); cuda::vector<int> histo(256); std::fill(std::begin(histo), std::end(histo), 0); for (auto &v : buffer) { v = rand(); } hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); CHECK(hipEventRecord(start, 0)); hipDeviceProp_t prop; CHECK(hipGetDeviceProperties(&prop, 0)); int blocks = prop.multiProcessorCount; hipLaunchKernelGGL(( histo_kernel), dim3(blocks * 2), dim3(256), 0, 0, buffer.data(), histo.data()); CHECK(hipEventRecord(stop, 0)); CHECK(hipEventSynchronize(stop)); float elapsed_time = .0f; CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Time to generate: %3.1f ms\n", elapsed_time); long histo_count = 0; for (int i = 0; i < 256; i++) { histo_count += histo[i]; } printf("Histogram Sum: %ld\n", histo_count); CHECK(hipEventDestroy(start)); CHECK(hipEventDestroy(stop)); assert(histo_count == SIZE); return 0; }
d725647e95fe180375439d27f9382a01b4bb85d1.cu
#include <cassert> #include "common.h" #include <random> static constexpr int SIZE = 100 * 1024 * 1024; __global__ void histo_kernel(unsigned char *buffer, int *histo) { int i = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while (i < SIZE) { atomicAdd(&histo[buffer[i]], 1); i += stride; } } int main() { cuda::vector<unsigned char> buffer(SIZE); cuda::vector<int> histo(256); std::fill(std::begin(histo), std::end(histo), 0); for (auto &v : buffer) { v = rand(); } cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); CHECK(cudaEventRecord(start, 0)); cudaDeviceProp prop; CHECK(cudaGetDeviceProperties(&prop, 0)); int blocks = prop.multiProcessorCount; histo_kernel<<<blocks * 2, 256>>>(buffer.data(), histo.data()); CHECK(cudaEventRecord(stop, 0)); CHECK(cudaEventSynchronize(stop)); float elapsed_time = .0f; CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Time to generate: %3.1f ms\n", elapsed_time); long histo_count = 0; for (int i = 0; i < 256; i++) { histo_count += histo[i]; } printf("Histogram Sum: %ld\n", histo_count); CHECK(cudaEventDestroy(start)); CHECK(cudaEventDestroy(stop)); assert(histo_count == SIZE); return 0; }
fd3719bb597362a2e8b33f26243fb39ea04e252b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void PossionImageCloningIteration( const float *fixed, const float *mask, float *input, float *output, const int wt, const int ht ){ const int dir[4][2] = { {0, -1}, {1, 0}, {0, 1}, {-1, 0}}; int num = 4; const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt * yt + xt; if (yt < ht && xt < wt && mask[curt] > 127.0f){ float sum[3] = {0}; for (int i=0; i<4; i++){ int dxt = xt + dir[i][0]; int dyt = yt + dir[i][1]; int dcurt = wt * dyt + dxt; if (dxt >= 0 && dxt < wt && dyt >= 0 && dyt < ht && mask[dcurt] > 127.0f){ sum[0] += input[dcurt*3+0]; sum[1] += input[dcurt*3+1]; sum[2] += input[dcurt*3+2]; } } output[curt*3+0] = fixed[curt*3+0] + sum[0] / num; output[curt*3+1] = fixed[curt*3+1] + sum[1] / num; output[curt*3+2] = fixed[curt*3+2] + sum[2] / num; } }
fd3719bb597362a2e8b33f26243fb39ea04e252b.cu
#include "includes.h" __global__ void PossionImageCloningIteration( const float *fixed, const float *mask, float *input, float *output, const int wt, const int ht ){ const int dir[4][2] = { {0, -1}, {1, 0}, {0, 1}, {-1, 0}}; int num = 4; const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt * yt + xt; if (yt < ht && xt < wt && mask[curt] > 127.0f){ float sum[3] = {0}; for (int i=0; i<4; i++){ int dxt = xt + dir[i][0]; int dyt = yt + dir[i][1]; int dcurt = wt * dyt + dxt; if (dxt >= 0 && dxt < wt && dyt >= 0 && dyt < ht && mask[dcurt] > 127.0f){ sum[0] += input[dcurt*3+0]; sum[1] += input[dcurt*3+1]; sum[2] += input[dcurt*3+2]; } } output[curt*3+0] = fixed[curt*3+0] + sum[0] / num; output[curt*3+1] = fixed[curt*3+1] + sum[1] / num; output[curt*3+2] = fixed[curt*3+2] + sum[2] / num; } }
6a66d76e9144a9c6f2a5c11ee3e0e7ad9347768f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/neuron_layer.hpp" #include "caffe/layers/pprelu_layer.hpp" namespace caffe { // CUDA kernele for forward template <typename Dtype> __global__ void PReLUForward(const int n, const int channels, const int dim, const Dtype* in, Dtype* out, const Dtype* slope_data, const Dtype* slope_data1, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; if (in[index] < Dtype(4)){ out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; } else{ out[index] = in[index] * slope_data1[c]; } } } // CUDA kernel for bottom backward template <typename Dtype> __global__ void PReLUBackward(const int n, const int channels, const int dim, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, const Dtype* slope_data, const Dtype* slope_data1, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; if (in_data[index] < Dtype(4)) out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); else out_diff[index] = in_diff[index] * slope_data1[c]; } } // CUDA kernel for element-wise parameter backward template <typename Dtype> __global__ void PReLUParamBackward(const int n, const int rows, const int rowPitch, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype* out_diff1) { CUDA_KERNEL_LOOP(index, n) { if (in_data[index] < 4) out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); else out_diff1[index] = in_diff[index] * in_data[index] * (in_data[index] >= 4); for ( int k = 1; k < rows; k++ ) { if (in_data[index] < 4) out_diff[index] += in_diff[index + k*rowPitch] * in_data[index + k*rowPitch] * (in_data[index + k*rowPitch] <= 0); else out_diff1[index] += in_diff[index + k*rowPitch] * in_data[index + k*rowPitch] * (in_data[index + k*rowPitch] >= 4); } } } template <typename Dtype> void PPReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->channels(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); const Dtype* slope_data1 = this->blobs_[1]->gpu_data(); const int div_factor = channel_shared_ ? channels : 1; // For in-place computation if (top[0] == bottom[0]) { caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data()); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, dim, bottom_data, top_data, slope_data, slope_data1, div_factor); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void PPReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->channels(); // For in-place computation if (top[0] == bottom[0]) { bottom_data = bottom_memory_.gpu_data(); } // Propagate to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); Dtype* slope_diff1 = this->blobs_[1]->mutable_gpu_diff(); int cdim = channels * dim; // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PReLUParamBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(cdim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, cdim, bottom[0]->num(), top[0]->offset(1), top_diff, bottom_data , backward_buff_.mutable_gpu_diff(), backward_buff_1.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype dsum; caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(), multiplier_.gpu_data(), &dsum); caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } else { caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1., backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1., backward_buff_1.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff1); } } // Propagate to bottom if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); const Dtype* slope_data1 = this->blobs_[1]->gpu_data(); int div_factor = channel_shared_ ? channels : 1; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, slope_data1, div_factor); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(PPReLULayer); } // namespace caffe
6a66d76e9144a9c6f2a5c11ee3e0e7ad9347768f.cu
#include <algorithm> #include <vector> #include "caffe/layers/neuron_layer.hpp" #include "caffe/layers/pprelu_layer.hpp" namespace caffe { // CUDA kernele for forward template <typename Dtype> __global__ void PReLUForward(const int n, const int channels, const int dim, const Dtype* in, Dtype* out, const Dtype* slope_data, const Dtype* slope_data1, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; if (in[index] < Dtype(4)){ out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; } else{ out[index] = in[index] * slope_data1[c]; } } } // CUDA kernel for bottom backward template <typename Dtype> __global__ void PReLUBackward(const int n, const int channels, const int dim, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, const Dtype* slope_data, const Dtype* slope_data1, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; if (in_data[index] < Dtype(4)) out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); else out_diff[index] = in_diff[index] * slope_data1[c]; } } // CUDA kernel for element-wise parameter backward template <typename Dtype> __global__ void PReLUParamBackward(const int n, const int rows, const int rowPitch, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype* out_diff1) { CUDA_KERNEL_LOOP(index, n) { if (in_data[index] < 4) out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); else out_diff1[index] = in_diff[index] * in_data[index] * (in_data[index] >= 4); for ( int k = 1; k < rows; k++ ) { if (in_data[index] < 4) out_diff[index] += in_diff[index + k*rowPitch] * in_data[index + k*rowPitch] * (in_data[index + k*rowPitch] <= 0); else out_diff1[index] += in_diff[index + k*rowPitch] * in_data[index + k*rowPitch] * (in_data[index + k*rowPitch] >= 4); } } } template <typename Dtype> void PPReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->channels(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); const Dtype* slope_data1 = this->blobs_[1]->gpu_data(); const int div_factor = channel_shared_ ? channels : 1; // For in-place computation if (top[0] == bottom[0]) { caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data()); } // NOLINT_NEXT_LINE(whitespace/operators) PReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, dim, bottom_data, top_data, slope_data, slope_data1, div_factor); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void PPReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->channels(); // For in-place computation if (top[0] == bottom[0]) { bottom_data = bottom_memory_.gpu_data(); } // Propagate to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); Dtype* slope_diff1 = this->blobs_[1]->mutable_gpu_diff(); int cdim = channels * dim; // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) PReLUParamBackward<Dtype><<<CAFFE_GET_BLOCKS(cdim), CAFFE_CUDA_NUM_THREADS>>>( cdim, bottom[0]->num(), top[0]->offset(1), top_diff, bottom_data , backward_buff_.mutable_gpu_diff(), backward_buff_1.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype dsum; caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(), multiplier_.gpu_data(), &dsum); caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } else { caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1., backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1., backward_buff_1.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff1); } } // Propagate to bottom if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); const Dtype* slope_data1 = this->blobs_[1]->gpu_data(); int div_factor = channel_shared_ ? channels : 1; // NOLINT_NEXT_LINE(whitespace/operators) PReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, slope_data1, div_factor); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(PPReLULayer); } // namespace caffe
a7c64c668aa078427b0af2980bdee43a7e47fae7.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
a7c64c668aa078427b0af2980bdee43a7e47fae7.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
Io.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include "params.h" #include "common.h" #include "bnd.h" #include "cosmo.h" #include "GPU.h" #include "Atomic.h" extern "C" void getalist(int); extern "C" void cuGetIC(int,int); //************************************************************************* //************************************************************************* #define NCELLS3 (NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2) //************************************************************************* //************************************************************************* void getalist(int rank) { FILE *fp; int i,idx; float dummy,dummy2; fp=fopen(filealist,"r"); fscanf(fp,"%d",&nalist); //printf("# of outputs in the list=%d\n",nalist); alist=(float*)(calloc(nalist,sizeof(float))); tlist=(float*)(calloc(nalist,sizeof(float))); for(i=0;i<nalist;i++) { fscanf(fp,"%d %f %f",&idx,&dummy,&dummy2); alist[i]=dummy; if(dummy>0){ tlist[i]=a2t(alist[i],omegav,Hubble0); } else{ tlist[i]=0.; } if (rank==0) printf("(%f %f) ",alist[i],tlist[i]/unit_time); } // printf("\n"); // printf("\n"); } //************************************************************************* //************************************************************************* void cuDumpResults(int i, float time, float aexp, int rank) { char fname[256]; char fmt[256]; FILE *fp; int nc=ncells+NBOUND2; float tt=time/unit_time; #ifndef WMPI strcpy(fmt,rootname); strcat(fmt,".%05d"); sprintf(fname,fmt,i); #else strcpy(fmt,rootname); strcat(fmt,".%05d.p%05d"); sprintf(fname,fmt,i,rank); #endif if(rank==0) printf("Writing output in %s on proc %d\n",fname,rank); hipMemcpy(egy,cuegy,NGRP*NCELLS3*sizeof(float),hipMemcpyDeviceToHost); #ifdef DUMPFLUX hipMemcpy(flx,cuflx,NGRP*NCELLS3*sizeof(float)*3,hipMemcpyDeviceToHost); #endif #ifndef SDISCRETE hipMemcpy(src0,cusrc0,NCELLS3*sizeof(float),hipMemcpyDeviceToHost); #else hipMemcpy(src0,cusrc0,nsource*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(src0pos,cusrc0pos,3*nsource*sizeof(int),hipMemcpyDeviceToHost); #endif hipMemcpy(xion,cuxion,NCELLS3*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(density,cudensity,NCELLS3*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(temperature,cutemperature,NCELLS3*sizeof(float),hipMemcpyDeviceToHost); fp=fopen(fname,"wb"); #ifndef DUMPGRID fwrite(&nc,sizeof(int),1,fp); fwrite(&nsource,sizeof(int),1,fp); fwrite(&tt,sizeof(float),1,fp); //fwrite(&ngrp,sizeof(int),1,fp); fwrite(egy,sizeof(float),NGRP*NCELLS3,fp); #ifdef DUMPFLUX fwrite(flx,sizeof(float),3*NGRP*NCELLS3,fp); #endif fwrite(xion,sizeof(float),NCELLS3,fp); fwrite(temperature,sizeof(float),NCELLS3,fp); #ifndef SDISCRETE //fwrite(src0,sizeof(float),NCELLS3,fp); #else fwrite(src0,sizeof(float),nsource,fp); fwrite(src0pos,sizeof(int),3*nsource,fp); #endif //fwrite(density,sizeof(float),NCELLS3,fp); fwrite(&aexp,sizeof(float),1,fp); // fin #else fwrite(&nc,sizeof(int),1,fp); fwrite(&tt,sizeof(float),1,fp); fwrite(xion,sizeof(float),NCELLS3,fp); fwrite(temperature,sizeof(float),NCELLS3,fp); #endif fclose(fp); if(rank==0) printf("Done on proc #%d\n",rank); } //************************************************************************* //************************************************************************* //************************************************************************* //************************************************************************* void cuGetIC(int iic,int rank) { int i; int factgrp[NGRP]; FACTGRP; #ifndef TEST_STROMGREN char fname[256]; char fmt[256]; FILE *fp; int nc; int ns; #ifndef WMPI if(iic!=0) { strcpy(fmt,rootname); strcat(fmt,".%05d"); sprintf(fname,fmt,iic); } else { strcpy(fmt,rootname); strcat(fmt,".ic"); sprintf(fname,fmt,iic); } #else if(iic!=0) { strcpy(fmt,rootname); strcat(fmt,".%05d.p%05d"); sprintf(fname,fmt,iic,rank); } else { strcpy(fmt,rootname); strcat(fmt,".ic.p%05d"); sprintf(fname,fmt,rank); } #endif if(rank==0) printf("Reading ICs in %s on proc #%d\n",fname,rank); fp=fopen(fname,"rb"); if(fp==NULL) { printf("ERROR : IC file does not exist !\n"); abort(); } fread(&nc,sizeof(int),1,fp); if(nc!=ncells+NBOUND2) { puts("ERROR while reading ICs : cell number is inconsistent !"); printf("nc=%d ncells+NBOUND2=%d\n",nc,ncells+NBOUND2); abort(); } fread(&ns,sizeof(int),1,fp); #ifdef SDISCRETE if(ns!=nsource) { printf("ERROR while reading ICs : source number is inconsistent ns=%d nsource=%d!\n",ns,nsource); abort(); } #endif fread(&t,sizeof(float),1,fp); if(rank==0) printf("nc=%d ns=%d t=%e\n",nc,ns,t); if((iic!=0)&&(NGRP>1)){ fread(egy,sizeof(float),NCELLS3*NGRP,fp); } else{ fread(egy,sizeof(float),NCELLS3,fp); } if((iic!=0)&&(NGRP>1)){ fread(flx,sizeof(float),3*NCELLS3*NGRP,fp); } else{ fread(flx,sizeof(float),3*NCELLS3,fp); } fread(xion,sizeof(float),NCELLS3,fp); fread(temperature,sizeof(float),NCELLS3,fp); #ifdef SDISCRETE fread(src0,sizeof(float),nsource,fp); fread(src0pos,sizeof(int),3*nsource,fp); #else fread(src0,sizeof(float),NCELLS3,fp); #endif fread(density,sizeof(float),NCELLS3,fp); fread(&astart,sizeof(float),1,fp); fclose(fp); if(rank==0) printf("astart=%e\n",astart); #ifdef SAFE for(i=0;i<NCELLS3;i++){ egy[i]+=1e-33; /* flx[i]=0.; */ /* xion[i]=1e-4; */ /* temperature[i]=1e2; */ /* src0[i]*=1.;//7e10; */ /* density[i]+=1e-2;//1e24+1e-6; */ } #endif #else // STROMGREN TEST CASE if(rank==0) { printf("Self-consistent generation for Stromgren Sphere\n"); } int ii,jj,kk,igrp; union { float f; unsigned char b[4]; } dat1, dat2; #define swap(X) dat1.f = X; dat2.b[0] = dat1.b[3]; dat2.b[1] = dat1.b[2]; dat2.b[2] = dat1.b[1]; dat2.b[3] = dat1.b[0]; X=dat2.f; float Z,dummy,density_temp; FILE *rho; rho=fopen("../sources/density.bin","rb"); fseek(rho,4,SEEK_CUR); fread(&Z,4,1,rho); swap(Z); fseek(rho,8,SEEK_CUR); fread(&dummy,sizeof(float),1,rho); swap(dummy); fseek(rho,4,SEEK_CUR); for(kk=0;kk<NCELLZ;kk++) { fseek(rho,4,SEEK_CUR); for(jj=0;jj<NCELLY;jj++) { for(ii=0;ii<NCELLX;ii++) { int idx=(ii+NBOUND)+(jj+NBOUND)*(NCELLX+2*NBOUND)+(kk+NBOUND)*(NCELLX+2*NBOUND)*(NCELLY+2*NBOUND); for (igrp=0;igrp<NGRP;igrp++) { egy[idx+igrp*NCELLS3]=0.; flx[idx+0*(NCELLX+2*NBOUND)*(NCELLY+2*NBOUND)*(NCELLZ+2*NBOUND)+igrp*NCELLS3*3]=0.; flx[idx+1*(NCELLX+2*NBOUND)*(NCELLY+2*NBOUND)*(NCELLZ+2*NBOUND)+igrp*NCELLS3*3]=0.; flx[idx+2*(NCELLX+2*NBOUND)*(NCELLY+2*NBOUND)*(NCELLZ+2*NBOUND)+igrp*NCELLS3*3]=0.; } #ifndef COOLING xion[idx]=1.2e-3; temperature[idx]= 1e4; //100K #else xion[idx]=1e-6; temperature[idx]= 1e2; #endif // density[idx]=1000.; /* int size=8; if( ((ii>NCELLY*3./4-size) && (ii<NCELLY*3./4+size)) && ((jj>NCELLZ*3./4-size) && (jj<NCELLZ*3./4+size)) ) { density[idx]=2000.;} else { density[idx]=1000.;} */ fread(&density_temp,sizeof(float),1,rho); swap(density_temp); density[idx]=density_temp*1e6; } } fseek(rho,4,SEEK_CUR); } fclose(rho); astart=1.; #ifndef WMPI /* src0[0]=5e48/dx/dx/dx/8.; src0pos[0]=0; src0pos[1]=0; src0pos[2]=0; */ int isrc,jsrc; int src0pos[16*3]; float src0[16]; FILE * src; src=fopen("../sources/sources.dat","r"); for (isrc=0;isrc<16;isrc++) { for (jsrc=0;jsrc<3;jsrc++) { fscanf(src,"%i",&src0pos[isrc+jsrc*16]); } fscanf(src,"%f",&src0[isrc]); src0[isrc] *= 1e52/dx/dx/dx; } fclose(src); #else if(rank==0) { src0[0]=0.; src0pos[0]=NCELLX/2; src0pos[1]=NCELLY/2; src0pos[2]=NCELLZ/2; } else { src0[0]=5e48/dx/dx/dx; src0pos[0]=0; src0pos[1]=0; src0pos[2]=0; } #endif #endif #ifndef COSMO c=effective_speed_of_light*c_r; #else c=effective_speed_of_light*c_r/astart; Hubble0=Hubble0/(9.7776e9*3.155815e7); // H0 in sec-1 #endif #ifdef SDISCRETE if((nsource!=0)&&(rank==0)) { printf("%d sources found\n",nsource); #ifndef WMPI for(i=0;i<nsource;i++) printf(" %d %d %d %e\n",src0pos[i],src0pos[i+nsource],src0pos[i+2*nsource],src0[i]); #endif printf("tstart= %e\n",t); } #endif if(iic==0){ for(i=0;i<NCELLS3;i++){ if(density[i]<0){ density[i]=defdens; temperature[i]=deftemp; } #ifdef FORCET temperature[i]=deftemp; #endif for(int j=NGRP-1;j>=0;j--) egy[i+j*NCELLS3]=fmaxf(egy_min*factgrp[j],egy[i+j*NCELLS3]); } } hipMemcpy(cuegy,egy,NCELLS3*sizeof(float)*NGRP,hipMemcpyHostToDevice); hipMemcpy(cuflx,flx,NCELLS3*sizeof(float)*3*NGRP,hipMemcpyHostToDevice); int odx=15684351; int ii,jj,kk; kk=odx/((NCELLX+NBOUND2)*(NCELLY+NBOUND2)); jj=(odx-kk*(NCELLX+NBOUND2)*(NCELLY+NBOUND2))/(NCELLX+NBOUND2); ii=odx-kk*(NCELLX+NBOUND2)*(NCELLY+NBOUND2)-jj*(NCELLX+NBOUND2); // printf("Rank=%d flx =%e %e %e egy=%e i=%d j=%d k=%d\n",rank,flx[odx-1],flx[odx],flx[odx+1],egy[odx],ii-NBOUND,jj-NBOUND,kk-NBOUND); hipMemcpy(cuxion,xion,NCELLS3*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(cudensity,density,NCELLS3*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(cutemperature,temperature,NCELLS3*sizeof(float),hipMemcpyHostToDevice); #ifdef SDISCRETE hipMemcpy(cusrc0,src0,nsource*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(cusrc0pos,src0pos,3*nsource*sizeof(int),hipMemcpyHostToDevice); #else hipMemcpy(cusrc0,src0,NCELLS3*sizeof(float),hipMemcpyHostToDevice); #endif if(rank==0) printf("in read astart=%e\n",astart); if(rank==0) printf("Device Memory allocated on proc #%d\n",rank); } //************************************************************************* int cuGetField(int iic, int rank) { char fname[256]; char fmt[256]; FILE *fp; int nc; int ns; float tloc; strcpy(fmt,fieldname); #ifdef WMPI strcat(fmt,"_%03d.p%05d"); sprintf(fname,fmt,iic,rank); #else strcat(fmt,"_%03d"); sprintf(fname,fmt,iic); #endif if(rank==0) printf("Reading Field in %s\n",fname); fp=fopen(fname,"rb"); if(fp==NULL) { printf("ERROR : IC file does not exist !\n"); return 38; } fread(&nc,sizeof(int),1,fp); if(nc!=ncells+NBOUND2) { puts("ERROR while reading Field : cell number is inconsistent !"); abort(); return 38; } fread(&ns,sizeof(int),1,fp); if(ns!=nsource) { puts("ERROR while reading Field : source number is inconsistent !"); abort(); return 38; } #ifndef LIGHTFIELD // regular snapshot format for input fields fread(&tloc,sizeof(float),1,fp); fread(egy,sizeof(float),NCELLS3,fp); fread(flx,sizeof(float),3*NCELLS3,fp); fread(xion,sizeof(float),NCELLS3,fp); fread(temperature,sizeof(float),NCELLS3,fp); #ifdef SDISCRETE fread(src0,sizeof(float),nsource,fp); #ifdef RAND_SRC srand(rank); for(int i=0;i<nsource;i++) { src0[i]*=(float)(rand())/(float)(RAND_MAX); } #endif fread(src0pos,sizeof(int),3*nsource,fp); #else fread(src0,sizeof(float),NCELLS3,fp); #endif fread(density,sizeof(float),NCELLS3,fp); fclose(fp); #else // light format for input fields fread(&tloc,sizeof(float),1,fp); #ifdef SDISCRETE fread(src0,sizeof(float),nsource,fp); fread(src0pos,sizeof(int),3*nsource,fp); #else fread(src0,sizeof(float),NCELLS3,fp); #endif fread(density,sizeof(float),NCELLS3,fp); fclose(fp); #endif for(int i=0;i<NCELLS3;i++){ if(density[i]<0){ density[i]=defdens; } } // sending data to GPU hipMemcpy(cudensity,density,NCELLS3*sizeof(float),hipMemcpyHostToDevice); #ifdef SDISCRETE hipMemcpy(cusrc0,src0,nsource*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(cusrc0pos,src0pos,3*nsource*sizeof(int),hipMemcpyHostToDevice); #else hipMemcpy(cusrc0,src0,NCELLS3*sizeof(float),hipMemcpyHostToDevice); #endif if(rank==0) puts("Fields updated"); return 0; } //========================================================== //==========================================================
Io.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "params.h" #include "common.h" #include "bnd.h" #include "cosmo.h" #include "GPU.h" #include "Atomic.h" extern "C" void getalist(int); extern "C" void cuGetIC(int,int); //************************************************************************* //************************************************************************* #define NCELLS3 (NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2) //************************************************************************* //************************************************************************* void getalist(int rank) { FILE *fp; int i,idx; float dummy,dummy2; fp=fopen(filealist,"r"); fscanf(fp,"%d",&nalist); //printf("# of outputs in the list=%d\n",nalist); alist=(float*)(calloc(nalist,sizeof(float))); tlist=(float*)(calloc(nalist,sizeof(float))); for(i=0;i<nalist;i++) { fscanf(fp,"%d %f %f",&idx,&dummy,&dummy2); alist[i]=dummy; if(dummy>0){ tlist[i]=a2t(alist[i],omegav,Hubble0); } else{ tlist[i]=0.; } if (rank==0) printf("(%f %f) ",alist[i],tlist[i]/unit_time); } // printf("\n"); // printf("\n"); } //************************************************************************* //************************************************************************* void cuDumpResults(int i, float time, float aexp, int rank) { char fname[256]; char fmt[256]; FILE *fp; int nc=ncells+NBOUND2; float tt=time/unit_time; #ifndef WMPI strcpy(fmt,rootname); strcat(fmt,".%05d"); sprintf(fname,fmt,i); #else strcpy(fmt,rootname); strcat(fmt,".%05d.p%05d"); sprintf(fname,fmt,i,rank); #endif if(rank==0) printf("Writing output in %s on proc %d\n",fname,rank); cudaMemcpy(egy,cuegy,NGRP*NCELLS3*sizeof(float),cudaMemcpyDeviceToHost); #ifdef DUMPFLUX cudaMemcpy(flx,cuflx,NGRP*NCELLS3*sizeof(float)*3,cudaMemcpyDeviceToHost); #endif #ifndef SDISCRETE cudaMemcpy(src0,cusrc0,NCELLS3*sizeof(float),cudaMemcpyDeviceToHost); #else cudaMemcpy(src0,cusrc0,nsource*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(src0pos,cusrc0pos,3*nsource*sizeof(int),cudaMemcpyDeviceToHost); #endif cudaMemcpy(xion,cuxion,NCELLS3*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(density,cudensity,NCELLS3*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(temperature,cutemperature,NCELLS3*sizeof(float),cudaMemcpyDeviceToHost); fp=fopen(fname,"wb"); #ifndef DUMPGRID fwrite(&nc,sizeof(int),1,fp); fwrite(&nsource,sizeof(int),1,fp); fwrite(&tt,sizeof(float),1,fp); //fwrite(&ngrp,sizeof(int),1,fp); fwrite(egy,sizeof(float),NGRP*NCELLS3,fp); #ifdef DUMPFLUX fwrite(flx,sizeof(float),3*NGRP*NCELLS3,fp); #endif fwrite(xion,sizeof(float),NCELLS3,fp); fwrite(temperature,sizeof(float),NCELLS3,fp); #ifndef SDISCRETE //fwrite(src0,sizeof(float),NCELLS3,fp); #else fwrite(src0,sizeof(float),nsource,fp); fwrite(src0pos,sizeof(int),3*nsource,fp); #endif //fwrite(density,sizeof(float),NCELLS3,fp); fwrite(&aexp,sizeof(float),1,fp); // fin #else fwrite(&nc,sizeof(int),1,fp); fwrite(&tt,sizeof(float),1,fp); fwrite(xion,sizeof(float),NCELLS3,fp); fwrite(temperature,sizeof(float),NCELLS3,fp); #endif fclose(fp); if(rank==0) printf("Done on proc #%d\n",rank); } //************************************************************************* //************************************************************************* //************************************************************************* //************************************************************************* void cuGetIC(int iic,int rank) { int i; int factgrp[NGRP]; FACTGRP; #ifndef TEST_STROMGREN char fname[256]; char fmt[256]; FILE *fp; int nc; int ns; #ifndef WMPI if(iic!=0) { strcpy(fmt,rootname); strcat(fmt,".%05d"); sprintf(fname,fmt,iic); } else { strcpy(fmt,rootname); strcat(fmt,".ic"); sprintf(fname,fmt,iic); } #else if(iic!=0) { strcpy(fmt,rootname); strcat(fmt,".%05d.p%05d"); sprintf(fname,fmt,iic,rank); } else { strcpy(fmt,rootname); strcat(fmt,".ic.p%05d"); sprintf(fname,fmt,rank); } #endif if(rank==0) printf("Reading ICs in %s on proc #%d\n",fname,rank); fp=fopen(fname,"rb"); if(fp==NULL) { printf("ERROR : IC file does not exist !\n"); abort(); } fread(&nc,sizeof(int),1,fp); if(nc!=ncells+NBOUND2) { puts("ERROR while reading ICs : cell number is inconsistent !"); printf("nc=%d ncells+NBOUND2=%d\n",nc,ncells+NBOUND2); abort(); } fread(&ns,sizeof(int),1,fp); #ifdef SDISCRETE if(ns!=nsource) { printf("ERROR while reading ICs : source number is inconsistent ns=%d nsource=%d!\n",ns,nsource); abort(); } #endif fread(&t,sizeof(float),1,fp); if(rank==0) printf("nc=%d ns=%d t=%e\n",nc,ns,t); if((iic!=0)&&(NGRP>1)){ fread(egy,sizeof(float),NCELLS3*NGRP,fp); } else{ fread(egy,sizeof(float),NCELLS3,fp); } if((iic!=0)&&(NGRP>1)){ fread(flx,sizeof(float),3*NCELLS3*NGRP,fp); } else{ fread(flx,sizeof(float),3*NCELLS3,fp); } fread(xion,sizeof(float),NCELLS3,fp); fread(temperature,sizeof(float),NCELLS3,fp); #ifdef SDISCRETE fread(src0,sizeof(float),nsource,fp); fread(src0pos,sizeof(int),3*nsource,fp); #else fread(src0,sizeof(float),NCELLS3,fp); #endif fread(density,sizeof(float),NCELLS3,fp); fread(&astart,sizeof(float),1,fp); fclose(fp); if(rank==0) printf("astart=%e\n",astart); #ifdef SAFE for(i=0;i<NCELLS3;i++){ egy[i]+=1e-33; /* flx[i]=0.; */ /* xion[i]=1e-4; */ /* temperature[i]=1e2; */ /* src0[i]*=1.;//7e10; */ /* density[i]+=1e-2;//1e24+1e-6; */ } #endif #else // STROMGREN TEST CASE if(rank==0) { printf("Self-consistent generation for Stromgren Sphere\n"); } int ii,jj,kk,igrp; union { float f; unsigned char b[4]; } dat1, dat2; #define swap(X) dat1.f = X; dat2.b[0] = dat1.b[3]; dat2.b[1] = dat1.b[2]; dat2.b[2] = dat1.b[1]; dat2.b[3] = dat1.b[0]; X=dat2.f; float Z,dummy,density_temp; FILE *rho; rho=fopen("../sources/density.bin","rb"); fseek(rho,4,SEEK_CUR); fread(&Z,4,1,rho); swap(Z); fseek(rho,8,SEEK_CUR); fread(&dummy,sizeof(float),1,rho); swap(dummy); fseek(rho,4,SEEK_CUR); for(kk=0;kk<NCELLZ;kk++) { fseek(rho,4,SEEK_CUR); for(jj=0;jj<NCELLY;jj++) { for(ii=0;ii<NCELLX;ii++) { int idx=(ii+NBOUND)+(jj+NBOUND)*(NCELLX+2*NBOUND)+(kk+NBOUND)*(NCELLX+2*NBOUND)*(NCELLY+2*NBOUND); for (igrp=0;igrp<NGRP;igrp++) { egy[idx+igrp*NCELLS3]=0.; flx[idx+0*(NCELLX+2*NBOUND)*(NCELLY+2*NBOUND)*(NCELLZ+2*NBOUND)+igrp*NCELLS3*3]=0.; flx[idx+1*(NCELLX+2*NBOUND)*(NCELLY+2*NBOUND)*(NCELLZ+2*NBOUND)+igrp*NCELLS3*3]=0.; flx[idx+2*(NCELLX+2*NBOUND)*(NCELLY+2*NBOUND)*(NCELLZ+2*NBOUND)+igrp*NCELLS3*3]=0.; } #ifndef COOLING xion[idx]=1.2e-3; temperature[idx]= 1e4; //100K #else xion[idx]=1e-6; temperature[idx]= 1e2; #endif // density[idx]=1000.; /* int size=8; if( ((ii>NCELLY*3./4-size) && (ii<NCELLY*3./4+size)) && ((jj>NCELLZ*3./4-size) && (jj<NCELLZ*3./4+size)) ) { density[idx]=2000.;} else { density[idx]=1000.;} */ fread(&density_temp,sizeof(float),1,rho); swap(density_temp); density[idx]=density_temp*1e6; } } fseek(rho,4,SEEK_CUR); } fclose(rho); astart=1.; #ifndef WMPI /* src0[0]=5e48/dx/dx/dx/8.; src0pos[0]=0; src0pos[1]=0; src0pos[2]=0; */ int isrc,jsrc; int src0pos[16*3]; float src0[16]; FILE * src; src=fopen("../sources/sources.dat","r"); for (isrc=0;isrc<16;isrc++) { for (jsrc=0;jsrc<3;jsrc++) { fscanf(src,"%i",&src0pos[isrc+jsrc*16]); } fscanf(src,"%f",&src0[isrc]); src0[isrc] *= 1e52/dx/dx/dx; } fclose(src); #else if(rank==0) { src0[0]=0.; src0pos[0]=NCELLX/2; src0pos[1]=NCELLY/2; src0pos[2]=NCELLZ/2; } else { src0[0]=5e48/dx/dx/dx; src0pos[0]=0; src0pos[1]=0; src0pos[2]=0; } #endif #endif #ifndef COSMO c=effective_speed_of_light*c_r; #else c=effective_speed_of_light*c_r/astart; Hubble0=Hubble0/(9.7776e9*3.155815e7); // H0 in sec-1 #endif #ifdef SDISCRETE if((nsource!=0)&&(rank==0)) { printf("%d sources found\n",nsource); #ifndef WMPI for(i=0;i<nsource;i++) printf(" %d %d %d %e\n",src0pos[i],src0pos[i+nsource],src0pos[i+2*nsource],src0[i]); #endif printf("tstart= %e\n",t); } #endif if(iic==0){ for(i=0;i<NCELLS3;i++){ if(density[i]<0){ density[i]=defdens; temperature[i]=deftemp; } #ifdef FORCET temperature[i]=deftemp; #endif for(int j=NGRP-1;j>=0;j--) egy[i+j*NCELLS3]=fmaxf(egy_min*factgrp[j],egy[i+j*NCELLS3]); } } cudaMemcpy(cuegy,egy,NCELLS3*sizeof(float)*NGRP,cudaMemcpyHostToDevice); cudaMemcpy(cuflx,flx,NCELLS3*sizeof(float)*3*NGRP,cudaMemcpyHostToDevice); int odx=15684351; int ii,jj,kk; kk=odx/((NCELLX+NBOUND2)*(NCELLY+NBOUND2)); jj=(odx-kk*(NCELLX+NBOUND2)*(NCELLY+NBOUND2))/(NCELLX+NBOUND2); ii=odx-kk*(NCELLX+NBOUND2)*(NCELLY+NBOUND2)-jj*(NCELLX+NBOUND2); // printf("Rank=%d flx =%e %e %e egy=%e i=%d j=%d k=%d\n",rank,flx[odx-1],flx[odx],flx[odx+1],egy[odx],ii-NBOUND,jj-NBOUND,kk-NBOUND); cudaMemcpy(cuxion,xion,NCELLS3*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(cudensity,density,NCELLS3*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(cutemperature,temperature,NCELLS3*sizeof(float),cudaMemcpyHostToDevice); #ifdef SDISCRETE cudaMemcpy(cusrc0,src0,nsource*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(cusrc0pos,src0pos,3*nsource*sizeof(int),cudaMemcpyHostToDevice); #else cudaMemcpy(cusrc0,src0,NCELLS3*sizeof(float),cudaMemcpyHostToDevice); #endif if(rank==0) printf("in read astart=%e\n",astart); if(rank==0) printf("Device Memory allocated on proc #%d\n",rank); } //************************************************************************* int cuGetField(int iic, int rank) { char fname[256]; char fmt[256]; FILE *fp; int nc; int ns; float tloc; strcpy(fmt,fieldname); #ifdef WMPI strcat(fmt,"_%03d.p%05d"); sprintf(fname,fmt,iic,rank); #else strcat(fmt,"_%03d"); sprintf(fname,fmt,iic); #endif if(rank==0) printf("Reading Field in %s\n",fname); fp=fopen(fname,"rb"); if(fp==NULL) { printf("ERROR : IC file does not exist !\n"); return 38; } fread(&nc,sizeof(int),1,fp); if(nc!=ncells+NBOUND2) { puts("ERROR while reading Field : cell number is inconsistent !"); abort(); return 38; } fread(&ns,sizeof(int),1,fp); if(ns!=nsource) { puts("ERROR while reading Field : source number is inconsistent !"); abort(); return 38; } #ifndef LIGHTFIELD // regular snapshot format for input fields fread(&tloc,sizeof(float),1,fp); fread(egy,sizeof(float),NCELLS3,fp); fread(flx,sizeof(float),3*NCELLS3,fp); fread(xion,sizeof(float),NCELLS3,fp); fread(temperature,sizeof(float),NCELLS3,fp); #ifdef SDISCRETE fread(src0,sizeof(float),nsource,fp); #ifdef RAND_SRC srand(rank); for(int i=0;i<nsource;i++) { src0[i]*=(float)(rand())/(float)(RAND_MAX); } #endif fread(src0pos,sizeof(int),3*nsource,fp); #else fread(src0,sizeof(float),NCELLS3,fp); #endif fread(density,sizeof(float),NCELLS3,fp); fclose(fp); #else // light format for input fields fread(&tloc,sizeof(float),1,fp); #ifdef SDISCRETE fread(src0,sizeof(float),nsource,fp); fread(src0pos,sizeof(int),3*nsource,fp); #else fread(src0,sizeof(float),NCELLS3,fp); #endif fread(density,sizeof(float),NCELLS3,fp); fclose(fp); #endif for(int i=0;i<NCELLS3;i++){ if(density[i]<0){ density[i]=defdens; } } // sending data to GPU cudaMemcpy(cudensity,density,NCELLS3*sizeof(float),cudaMemcpyHostToDevice); #ifdef SDISCRETE cudaMemcpy(cusrc0,src0,nsource*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(cusrc0pos,src0pos,3*nsource*sizeof(int),cudaMemcpyHostToDevice); #else cudaMemcpy(cusrc0,src0,NCELLS3*sizeof(float),cudaMemcpyHostToDevice); #endif if(rank==0) puts("Fields updated"); return 0; } //========================================================== //==========================================================
c4cd8088bfdafe2f6f6d41f896d57880e67ed62a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "cephes.h" #include "rats.h" #define MAXNUMOFTEMPLATES 148 /* APERIODIC TEMPLATES: 148=>temp_length=9 */ #define MIN(x,y) ((x) > (y) ? (y) : (x)) typedef struct para{ int M; int N; int m; double lambda; double varWj; } gpu_param; __device__ int GET_EPSILON1(unsigned char*data,int offset){ return ((data[offset/8]>>(7-(offset)%8))&1); } __device__ int MINVALUE(int a,int b){ return a > b ? b : a; } __global__ void nonoverlapKernel(int* WJ, double *CHI2_gpu, gpu_param *M_gpu, unsigned char* data, unsigned char*sequence){ int i, jj , k , j , match, K = 5;; unsigned int W_obs, nu[6]; double sum,chi2; int numOfTemplates[100] = {0, 0, 2, 4, 6, 12, 20, 40, 74, 148, 284, 568, 1116, 2232, 4424, 8848, 17622, 35244, 70340, 140680, 281076, 562152}; int N = M_gpu->N; int M = M_gpu->M; int m = M_gpu->m; double lambda = M_gpu->lambda; double varWj = M_gpu->varWj; // unsigned int *Wj = NULL; // if ( (Wj = (unsigned int*)malloc(148*N*sizeof(unsigned int))) == NULL ) { // return ; // } // int threadId = blockIdx.x *blockDim.x + threadIdx.x; int blockID = blockIdx.x; int tid = threadIdx.x; if(tid<MINVALUE(MAXNUMOFTEMPLATES, numOfTemplates[m])) { //printf("N:%d , M:%d , m:%d , lam:%f , var:%f\n",N,M,m,lambda,varWj); //sum = 0; // for ( k=0; k<=K; k++ ) // nu[k] = 0; // for ( i=0; i<N; i++ ) { W_obs = 0; for ( j=0; j<M-m+1; j++ ) { match = 1; for ( k=0; k<m; k++ ) { if ( (int)sequence[tid*m+k] != (int)GET_EPSILON1(data,blockID*M+j+k) ) { match = 0; break; } } if ( match == 1 ) { W_obs++; j += m-1; } } //Wj[tid*N+blockID] = W_obs; WJ[tid*N+blockID] = W_obs; //printf("tid:%d bid:%d wj:%d\n",tid,blockID,WJ[tid*N+blockID]); } } __global__ void ReductionSum(int* WJ_gpu,double *CHI2_gpu, gpu_param *M_gpu, unsigned char* data, unsigned char*sequence){ int i, jj , k , j , match, K = 5;; unsigned int W_obs, nu[6]; double sum,chi2; int numOfTemplates[100] = {0, 0, 2, 4, 6, 12, 20, 40, 74, 148, 284, 568, 1116, 2232, 4424, 8848, 17622, 35244, 70340, 140680, 281076, 562152}; int N = M_gpu->N; int M = M_gpu->M; int m = M_gpu->m; double lambda = M_gpu->lambda; double varWj = M_gpu->varWj; unsigned int *Wj = NULL; if ( (Wj = (unsigned int*)malloc(N*sizeof(unsigned int))) == NULL ) { return ; } // int threadId = blockIdx.x *blockDim.x + threadIdx.x; int blockID = blockIdx.x; if(threadId<MINVALUE(MAXNUMOFTEMPLATES, numOfTemplates[m])) { //printf("N:%d , M:%d , m:%d , lam:%f , var:%f\n",N,M,m,lambda,varWj); sum = 0; for ( k=0; k<=K; k++ ) nu[k] = 0; for ( i=0; i<N; i++ ) { W_obs = 0; for ( j=0; j<M-m+1; j++ ) { match = 1; for ( k=0; k<m; k++ ) { if ( (int)sequence[threadId*m+k] != (int)GET_EPSILON1(data,i*M+j+k) ) { match = 0; break; } } if ( match == 1 ) { W_obs++; j += m-1; } } Wj[i] = W_obs; } sum = 0; chi2 = 0.0; /* Compute Chi Square */ for ( i=0; i<N; i++ ) { // if ( m == 10 ) // fprintf(stats[TEST_NONPERIODIC], "%3d ", Wj[i]); // else // fprintf(stats[TEST_NONPERIODIC], "%4d ", Wj[i]); chi2 += pow(((double)Wj[i] - lambda)/pow(varWj, 0.5), 2); } CHI2_gpu[threadId] = chi2; printf("CH2gpu:%f\n",CHI2_gpu[threadId]); //float p_value = cephes_igamc(N/2.0, chi2/2.0); } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * N O N O V E R L A P P I N G T E M P L A T E T E S T * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ int NonOverlappingTemplateMatchingsCUDA(double alpha, unsigned char *data, int bits, int m) { hipDeviceReset(); int n = bits; int numOfTemplates[100] = {0, 0, 2, 4, 6, 12, 20, 40, 74, 148, 284, 568, 1116, 2232, 4424, 8848, 17622, 35244, 70340, 140680, 281076, 562152}; /*---------------------------------------------------------------------------- NOTE: Should additional templates lengths beyond 21 be desired, they must first be constructed, saved into files and then the corresponding number of nonperiodic templates for that file be stored in the m-th position in the numOfTemplates variable. ----------------------------------------------------------------------------*/ unsigned int bit, W_obs, nu[6], *Wj = NULL; FILE *fp = NULL; double sum, chi2, p_value, lambda, pi[6], varWj; int i, j, jj, k, match, SKIP, M, N, K = 5; char directory[100]; unsigned char *sequence = NULL; unsigned char sequence1[(MIN(MAXNUMOFTEMPLATES, numOfTemplates[m]))][m] ; int i_,k_; time_t s,e; N = 8; M = n/N; if ( (Wj = (unsigned int*)malloc(N*sizeof(unsigned int))) == NULL ) { return 0; } lambda = (M-m+1)/pow(2, m); varWj = M*(1.0/pow(2.0, m) - (2.0*m-1.0)/pow(2.0, 2.0*m)); sprintf(directory, "../templates/template%d", m); if ( ((isNegative(lambda)) || (isZero(lambda))) || ((fp = fopen(directory, "r")) == NULL|| (((sequence = (unsigned char*)malloc(MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])*m*sizeof(unsigned char))) == NULL )))) { if ( sequence != NULL ) free(sequence); } else { if ( numOfTemplates[m] < MAXNUMOFTEMPLATES ) SKIP = 1; else SKIP = (int)(numOfTemplates[m]/MAXNUMOFTEMPLATES); numOfTemplates[m] = (int)numOfTemplates[m]/SKIP; sum = 0.0; for ( i=0; i<2; i++ ) { /* Compute Probabilities */ pi[i] = exp(-lambda+i*log(lambda)-cephes_lgam(i+1)); sum += pi[i]; } pi[0] = sum; for ( i=2; i<=K; i++ ) { /* Compute Probabilities */ pi[i-1] = exp(-lambda+i*log(lambda)-cephes_lgam(i+1)); sum += pi[i-1]; } pi[K] = 1 - sum; ///////////////////////////////////////////// for (i_ = 0; i_ < (MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])); i_++){ for (k_=0; k_<m; k_++ ) { fscanf(fp, "%d", &bit); sequence1[i_][k_] = bit; sequence[i_*m+k_] = bit; } } ///////////////////////////////////////////// //host gpu_param M_cpu; M_cpu.M = M; M_cpu.N = N; M_cpu.m = m; M_cpu.lambda = lambda; M_cpu.varWj = varWj; double CHI2[MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])] = {0.0}; int WJ[148*8] = {0}; //device gpu_param *M_gpu; unsigned char *data_gpu; double *CHI2_gpu; hipMalloc((void**)&CHI2_gpu,(MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])*sizeof(double))); unsigned char *sequenceGPU ; hipMalloc((void**)&sequenceGPU,MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])*m*sizeof(double)); hipMalloc((void**)&M_gpu,sizeof(gpu_param)); hipMalloc((void**)&data_gpu,(bits / 8)*sizeof(unsigned char)); int * WJ_gpu; hipMalloc((void**)&WJ_gpu,148*8*sizeof(int)); hipMemcpy(M_gpu, &M_cpu, sizeof(gpu_param), hipMemcpyHostToDevice); hipMemcpy(data_gpu, data, (bits / 8)*sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(sequenceGPU, sequence, MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])*m*sizeof(double), hipMemcpyHostToDevice); int blocksPerGrid = (MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])+512-1)/512; hipLaunchKernelGGL(( nonoverlapKernel), dim3(8),dim3(148), 0, 0, WJ_gpu,CHI2_gpu,M_gpu,data_gpu,sequenceGPU); // s = clock(); // hipMemcpy(WJ, WJ_gpu, 148*8*sizeof(int), hipMemcpyDeviceToHost); hipFree(WJ_gpu); hipFree(CHI2_gpu); hipFree(M_gpu); hipFree(data_gpu); hipFree(sequenceGPU); //hipMemcpy(CHI2, CHI2_gpu, MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])*sizeof(double), hipMemcpyDeviceToHost); // e = clock(); // printf("1time:%f\n",(double)(e-s)/CLOCKS_PER_SEC); int flag = 1; for( jj=0; jj<MIN(MAXNUMOFTEMPLATES, numOfTemplates[m]); jj++ ) { sum = 0; chi2 = 0.0; /* Compute Chi Square */ //printf("wj:%d\n",WJ[jj]); for ( i=0; i<N; i++ ) { // if ( m == 10 ) // fprintf(stats[TEST_NONPERIODIC], "%3d ", Wj[i]); // else // fprintf(stats[TEST_NONPERIODIC], "%4d ", Wj[i]); //printf("WJ:%d\n",WJ[i]); chi2 += pow(((double)WJ[jj*N+i] - lambda)/pow(varWj, 0.5), 2); } p_value = cephes_igamc(N/2.0, chi2/2.0); if (p_value < alpha) flag = 0; // if ( isNegative(p_value) || isGreaterThanOne(p_value) ) // printf("\t\tWARNING: P_VALUE IS OUT OF RANGE.\n"); // printf("%9.6f %f %s %3d\n", chi2, p_value, p_value < alpha ? "FAILURE" : "SUCCESS", jj); // if ( SKIP > 1 ) // fseek(fp, (long)(SKIP-1)*2*m, SEEK_CUR); //} } //free(WJ_gpu); // for( jj=0; jj<MIN(MAXNUMOFTEMPLATES, numOfTemplates[m]); jj++ ) { // printf("CHI2:%f \n",CHI2[jj]); // p_value = cephes_igamc(N/2.0, CHI2[jj]/2.0); // if ( isNegative(p_value) || isGreaterThanOne(p_value) ) // printf("\t\tWARNING: P_VALUE IS OUT OF RANGE.\n"); // // printf("%9.6f %f %s %3d\n", CHI2[jj], p_value, p_value < alpha ? "FAILURE" : "SUCCESS", jj); // if ( SKIP > 1 ) // fseek(fp, (long)(SKIP-1)*2*m, SEEK_CUR); // } } if ( sequence != NULL ) free(sequence); free(Wj); if ( fp != NULL ) fclose(fp); // printf("p_value: %f\n",p_value); // if (p_value < alpha) // return 0; // return 1; }
c4cd8088bfdafe2f6f6d41f896d57880e67ed62a.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "cephes.h" #include "rats.h" #define MAXNUMOFTEMPLATES 148 /* APERIODIC TEMPLATES: 148=>temp_length=9 */ #define MIN(x,y) ((x) > (y) ? (y) : (x)) typedef struct para{ int M; int N; int m; double lambda; double varWj; } gpu_param; __device__ int GET_EPSILON1(unsigned char*data,int offset){ return ((data[offset/8]>>(7-(offset)%8))&1); } __device__ int MINVALUE(int a,int b){ return a > b ? b : a; } __global__ void nonoverlapKernel(int* WJ, double *CHI2_gpu, gpu_param *M_gpu, unsigned char* data, unsigned char*sequence){ int i, jj , k , j , match, K = 5;; unsigned int W_obs, nu[6]; double sum,chi2; int numOfTemplates[100] = {0, 0, 2, 4, 6, 12, 20, 40, 74, 148, 284, 568, 1116, 2232, 4424, 8848, 17622, 35244, 70340, 140680, 281076, 562152}; int N = M_gpu->N; int M = M_gpu->M; int m = M_gpu->m; double lambda = M_gpu->lambda; double varWj = M_gpu->varWj; // unsigned int *Wj = NULL; // if ( (Wj = (unsigned int*)malloc(148*N*sizeof(unsigned int))) == NULL ) { // return ; // } //确定索引 int threadId = blockIdx.x *blockDim.x + threadIdx.x; int blockID = blockIdx.x; int tid = threadIdx.x; if(tid<MINVALUE(MAXNUMOFTEMPLATES, numOfTemplates[m])) { //printf("N:%d , M:%d , m:%d , lam:%f , var:%f\n",N,M,m,lambda,varWj); //sum = 0; // for ( k=0; k<=K; k++ ) // nu[k] = 0; // for ( i=0; i<N; i++ ) { W_obs = 0; for ( j=0; j<M-m+1; j++ ) { match = 1; for ( k=0; k<m; k++ ) { if ( (int)sequence[tid*m+k] != (int)GET_EPSILON1(data,blockID*M+j+k) ) { match = 0; break; } } if ( match == 1 ) { W_obs++; j += m-1; } } //Wj[tid*N+blockID] = W_obs; WJ[tid*N+blockID] = W_obs; //printf("tid:%d bid:%d wj:%d\n",tid,blockID,WJ[tid*N+blockID]); } } __global__ void ReductionSum(int* WJ_gpu,double *CHI2_gpu, gpu_param *M_gpu, unsigned char* data, unsigned char*sequence){ int i, jj , k , j , match, K = 5;; unsigned int W_obs, nu[6]; double sum,chi2; int numOfTemplates[100] = {0, 0, 2, 4, 6, 12, 20, 40, 74, 148, 284, 568, 1116, 2232, 4424, 8848, 17622, 35244, 70340, 140680, 281076, 562152}; int N = M_gpu->N; int M = M_gpu->M; int m = M_gpu->m; double lambda = M_gpu->lambda; double varWj = M_gpu->varWj; unsigned int *Wj = NULL; if ( (Wj = (unsigned int*)malloc(N*sizeof(unsigned int))) == NULL ) { return ; } //确定索引 int threadId = blockIdx.x *blockDim.x + threadIdx.x; int blockID = blockIdx.x; if(threadId<MINVALUE(MAXNUMOFTEMPLATES, numOfTemplates[m])) { //printf("N:%d , M:%d , m:%d , lam:%f , var:%f\n",N,M,m,lambda,varWj); sum = 0; for ( k=0; k<=K; k++ ) nu[k] = 0; for ( i=0; i<N; i++ ) { W_obs = 0; for ( j=0; j<M-m+1; j++ ) { match = 1; for ( k=0; k<m; k++ ) { if ( (int)sequence[threadId*m+k] != (int)GET_EPSILON1(data,i*M+j+k) ) { match = 0; break; } } if ( match == 1 ) { W_obs++; j += m-1; } } Wj[i] = W_obs; } sum = 0; chi2 = 0.0; /* Compute Chi Square */ for ( i=0; i<N; i++ ) { // if ( m == 10 ) // fprintf(stats[TEST_NONPERIODIC], "%3d ", Wj[i]); // else // fprintf(stats[TEST_NONPERIODIC], "%4d ", Wj[i]); chi2 += pow(((double)Wj[i] - lambda)/pow(varWj, 0.5), 2); } CHI2_gpu[threadId] = chi2; printf("CH2gpu:%f\n",CHI2_gpu[threadId]); //float p_value = cephes_igamc(N/2.0, chi2/2.0); } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * N O N O V E R L A P P I N G T E M P L A T E T E S T * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ int NonOverlappingTemplateMatchingsCUDA(double alpha, unsigned char *data, int bits, int m) { cudaDeviceReset(); int n = bits; int numOfTemplates[100] = {0, 0, 2, 4, 6, 12, 20, 40, 74, 148, 284, 568, 1116, 2232, 4424, 8848, 17622, 35244, 70340, 140680, 281076, 562152}; /*---------------------------------------------------------------------------- NOTE: Should additional templates lengths beyond 21 be desired, they must first be constructed, saved into files and then the corresponding number of nonperiodic templates for that file be stored in the m-th position in the numOfTemplates variable. ----------------------------------------------------------------------------*/ unsigned int bit, W_obs, nu[6], *Wj = NULL; FILE *fp = NULL; double sum, chi2, p_value, lambda, pi[6], varWj; int i, j, jj, k, match, SKIP, M, N, K = 5; char directory[100]; unsigned char *sequence = NULL; unsigned char sequence1[(MIN(MAXNUMOFTEMPLATES, numOfTemplates[m]))][m] ; int i_,k_; time_t s,e; N = 8; M = n/N; if ( (Wj = (unsigned int*)malloc(N*sizeof(unsigned int))) == NULL ) { return 0; } lambda = (M-m+1)/pow(2, m); varWj = M*(1.0/pow(2.0, m) - (2.0*m-1.0)/pow(2.0, 2.0*m)); sprintf(directory, "../templates/template%d", m); if ( ((isNegative(lambda)) || (isZero(lambda))) || ((fp = fopen(directory, "r")) == NULL|| (((sequence = (unsigned char*)malloc(MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])*m*sizeof(unsigned char))) == NULL )))) { if ( sequence != NULL ) free(sequence); } else { if ( numOfTemplates[m] < MAXNUMOFTEMPLATES ) SKIP = 1; else SKIP = (int)(numOfTemplates[m]/MAXNUMOFTEMPLATES); numOfTemplates[m] = (int)numOfTemplates[m]/SKIP; sum = 0.0; for ( i=0; i<2; i++ ) { /* Compute Probabilities */ pi[i] = exp(-lambda+i*log(lambda)-cephes_lgam(i+1)); sum += pi[i]; } pi[0] = sum; for ( i=2; i<=K; i++ ) { /* Compute Probabilities */ pi[i-1] = exp(-lambda+i*log(lambda)-cephes_lgam(i+1)); sum += pi[i-1]; } pi[K] = 1 - sum; ///////////////////////////////////////////// for (i_ = 0; i_ < (MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])); i_++){ for (k_=0; k_<m; k_++ ) { fscanf(fp, "%d", &bit); sequence1[i_][k_] = bit; sequence[i_*m+k_] = bit; } } ///////////////////////////////////////////// //分配host变量 gpu_param M_cpu; M_cpu.M = M; M_cpu.N = N; M_cpu.m = m; M_cpu.lambda = lambda; M_cpu.varWj = varWj; double CHI2[MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])] = {0.0}; int WJ[148*8] = {0}; //分配device变量 gpu_param *M_gpu; unsigned char *data_gpu; double *CHI2_gpu; cudaMalloc((void**)&CHI2_gpu,(MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])*sizeof(double))); unsigned char *sequenceGPU ; cudaMalloc((void**)&sequenceGPU,MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])*m*sizeof(double)); cudaMalloc((void**)&M_gpu,sizeof(gpu_param)); cudaMalloc((void**)&data_gpu,(bits / 8)*sizeof(unsigned char)); int * WJ_gpu; cudaMalloc((void**)&WJ_gpu,148*8*sizeof(int)); cudaMemcpy(M_gpu, &M_cpu, sizeof(gpu_param), cudaMemcpyHostToDevice); cudaMemcpy(data_gpu, data, (bits / 8)*sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(sequenceGPU, sequence, MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])*m*sizeof(double), cudaMemcpyHostToDevice); int blocksPerGrid = (MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])+512-1)/512; nonoverlapKernel<<<8,148>>>(WJ_gpu,CHI2_gpu,M_gpu,data_gpu,sequenceGPU); // s = clock(); //将结果传回到主机端 cudaMemcpy(WJ, WJ_gpu, 148*8*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(WJ_gpu); cudaFree(CHI2_gpu); cudaFree(M_gpu); cudaFree(data_gpu); cudaFree(sequenceGPU); //cudaMemcpy(CHI2, CHI2_gpu, MIN(MAXNUMOFTEMPLATES, numOfTemplates[m])*sizeof(double), cudaMemcpyDeviceToHost); // e = clock(); // printf("1time:%f\n",(double)(e-s)/CLOCKS_PER_SEC); int flag = 1; for( jj=0; jj<MIN(MAXNUMOFTEMPLATES, numOfTemplates[m]); jj++ ) { sum = 0; chi2 = 0.0; /* Compute Chi Square */ //printf("wj:%d\n",WJ[jj]); for ( i=0; i<N; i++ ) { // if ( m == 10 ) // fprintf(stats[TEST_NONPERIODIC], "%3d ", Wj[i]); // else // fprintf(stats[TEST_NONPERIODIC], "%4d ", Wj[i]); //printf("WJ:%d\n",WJ[i]); chi2 += pow(((double)WJ[jj*N+i] - lambda)/pow(varWj, 0.5), 2); } p_value = cephes_igamc(N/2.0, chi2/2.0); if (p_value < alpha) flag = 0; // if ( isNegative(p_value) || isGreaterThanOne(p_value) ) // printf("\t\tWARNING: P_VALUE IS OUT OF RANGE.\n"); // printf("%9.6f %f %s %3d\n", chi2, p_value, p_value < alpha ? "FAILURE" : "SUCCESS", jj); // if ( SKIP > 1 ) // fseek(fp, (long)(SKIP-1)*2*m, SEEK_CUR); //} } //free(WJ_gpu); // for( jj=0; jj<MIN(MAXNUMOFTEMPLATES, numOfTemplates[m]); jj++ ) { // printf("CHI2:%f \n",CHI2[jj]); // p_value = cephes_igamc(N/2.0, CHI2[jj]/2.0); // if ( isNegative(p_value) || isGreaterThanOne(p_value) ) // printf("\t\tWARNING: P_VALUE IS OUT OF RANGE.\n"); // // printf("%9.6f %f %s %3d\n", CHI2[jj], p_value, p_value < alpha ? "FAILURE" : "SUCCESS", jj); // if ( SKIP > 1 ) // fseek(fp, (long)(SKIP-1)*2*m, SEEK_CUR); // } } if ( sequence != NULL ) free(sequence); free(Wj); if ( fp != NULL ) fclose(fp); // printf("p_value: %f\n",p_value); // if (p_value < alpha) // return 0; // return 1; }
0359a5ce5f418d029f2c3e38e27571a36b8ecd61.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/amp/check_finite_and_unscale_op.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T> __global__ void InverseAndMemset(const T* s, T* o, bool* found_inf) { *o = Inverse<T>(*s); *found_inf = false; } template <typename T, typename MT> __global__ void CheckFiniteAndUnscale(const T** xs, const MT* scale, int64_t size, int64_t* starts, bool* found_inf, T** outs) { const int64_t tid = threadIdx.x + blockIdx.x * blockDim.x; // copy starts array from global memory to shared memory extern __shared__ int64_t s_starts[]; for (int i = threadIdx.x; i <= size; i += blockDim.x) { s_starts[i] = starts[i]; } __syncthreads(); const int64_t num = s_starts[size]; int xs_index = 0; bool local_found_inf = false; const MT local_scale = *scale; for (int64_t idx = tid; idx < num; idx += gridDim.x * blockDim.x) { // get the "out" index of "id" // For example: // idx = 15, starts = [0, 10, 10, 20, 30] // because 10 <= idx < 20 ==> // the idx element locate in the 3rd tensor (notice the 2nd tensor size is // 0) int next_xs_index = xs_index; while (idx >= s_starts[next_xs_index]) next_xs_index++; xs_index = next_xs_index - 1; // get in data and out data const T* in = xs[xs_index]; T* out = outs[xs_index]; int64_t in_idx = idx - s_starts[xs_index]; // Unscale MT val = static_cast<MT>(in[in_idx]) * local_scale; T narrow_val = static_cast<T>(val); out[in_idx] = narrow_val; // CheckFinite if (!isfinite(narrow_val)) { local_found_inf = true; } } if (local_found_inf) { *found_inf = true; } } template <typename T> class CheckFiniteAndUnscaleGpuKernel : public framework::OpKernel<T> { using MPDType = typename details::MPTypeTrait<T>::Type; public: void Compute(const framework::ExecutionContext& ctx) const { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); const auto xs = ctx.MultiInput<framework::Tensor>("X"); const auto* scale = ctx.Input<framework::Tensor>("Scale"); auto outs = ctx.MultiOutput<framework::Tensor>("Out"); auto* found_inf = ctx.Output<framework::Tensor>("FoundInfinite"); const MPDType* scale_data = scale->data<MPDType>(); bool* found_inf_data = found_inf->mutable_data<bool>(dev_ctx.GetPlace()); framework::Tensor inverse_scale = ctx.AllocateTmpTensor<MPDType, platform::CUDADeviceContext>({1}, dev_ctx); MPDType* inverse_scale_v = inverse_scale.template data<MPDType>(); hipLaunchKernelGGL(( InverseAndMemset<MPDType>), dim3(1), dim3(1), 0, dev_ctx.stream(), scale_data, inverse_scale_v, found_inf_data); size_t xs_size = xs.size(); const auto& cpu_place = platform::CPUPlace(); // calculate each tensor's start index and copy to device auto h_starts_tensor = memory::Alloc(cpu_place, (xs_size + 1) * sizeof(int64_t)); int64_t* h_starts = reinterpret_cast<int64_t*>(h_starts_tensor->ptr()); auto d_starts_tensor = memory::Alloc(dev_ctx, (xs_size + 1) * sizeof(int64_t)); int64_t* d_starts = reinterpret_cast<int64_t*>(d_starts_tensor->ptr()); // the start index value of each tensor is // the sum of previous tensor's size. For example: // xs = [10, 0, 10, 10] ==> starts = [0, 10, 10, 20, 30] h_starts[0] = 0; for (int i = 1; i <= xs_size; i++) { h_starts[i] = h_starts[i - 1] + xs[i - 1]->numel(); } int64_t total_num = h_starts[xs_size]; memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()), d_starts, cpu_place, h_starts, (xs_size + 1) * sizeof(int64_t), dev_ctx.stream()); // copy each tensor's data address to device auto h_mem = memory::Alloc(cpu_place, 2 * xs_size * sizeof(T*)); const T** h_xs = reinterpret_cast<const T**>(h_mem->ptr()); T** h_outs = reinterpret_cast<T**>(h_mem->ptr()) + xs_size; auto d_mem = memory::Alloc(dev_ctx, 2 * xs_size * sizeof(T*)); const T** d_xs = reinterpret_cast<const T**>(d_mem->ptr()); T** d_outs = reinterpret_cast<T**>(d_mem->ptr()) + xs_size; for (size_t i = 0; i < xs_size; ++i) { h_xs[i] = xs[i]->data<T>(); h_outs[i] = outs[i]->mutable_data<T>(dev_ctx.GetPlace()); } memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()), d_xs, cpu_place, h_xs, 2 * xs_size * sizeof(T*), dev_ctx.stream()); // Launch Kernel int threads_per_block = ::min(static_cast<int64_t>(1024), total_num); int elements_per_block = threads_per_block * 20; // each thread deal with 20 number int blocks_per_grid = (total_num + elements_per_block - 1) / elements_per_block; VLOG(3) << "launch kernel"; hipLaunchKernelGGL(( CheckFiniteAndUnscale< T, MPDType>), dim3(blocks_per_grid), dim3(threads_per_block), (xs_size + 1) * sizeof(int64_t), dev_ctx.stream(), d_xs, inverse_scale_v, xs_size, d_starts, found_inf_data, d_outs); VLOG(3) << "finish kernel"; } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(check_finite_and_unscale, ops::CheckFiniteAndUnscaleGpuKernel<float>, ops::CheckFiniteAndUnscaleGpuKernel<double>, ops::CheckFiniteAndUnscaleGpuKernel<plat::float16>);
0359a5ce5f418d029f2c3e38e27571a36b8ecd61.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/amp/check_finite_and_unscale_op.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T> __global__ void InverseAndMemset(const T* s, T* o, bool* found_inf) { *o = Inverse<T>(*s); *found_inf = false; } template <typename T, typename MT> __global__ void CheckFiniteAndUnscale(const T** xs, const MT* scale, int64_t size, int64_t* starts, bool* found_inf, T** outs) { const int64_t tid = threadIdx.x + blockIdx.x * blockDim.x; // copy starts array from global memory to shared memory extern __shared__ int64_t s_starts[]; for (int i = threadIdx.x; i <= size; i += blockDim.x) { s_starts[i] = starts[i]; } __syncthreads(); const int64_t num = s_starts[size]; int xs_index = 0; bool local_found_inf = false; const MT local_scale = *scale; for (int64_t idx = tid; idx < num; idx += gridDim.x * blockDim.x) { // get the "out" index of "id" // For example: // idx = 15, starts = [0, 10, 10, 20, 30] // because 10 <= idx < 20 ==> // the idx element locate in the 3rd tensor (notice the 2nd tensor size is // 0) int next_xs_index = xs_index; while (idx >= s_starts[next_xs_index]) next_xs_index++; xs_index = next_xs_index - 1; // get in data and out data const T* in = xs[xs_index]; T* out = outs[xs_index]; int64_t in_idx = idx - s_starts[xs_index]; // Unscale MT val = static_cast<MT>(in[in_idx]) * local_scale; T narrow_val = static_cast<T>(val); out[in_idx] = narrow_val; // CheckFinite if (!isfinite(narrow_val)) { local_found_inf = true; } } if (local_found_inf) { *found_inf = true; } } template <typename T> class CheckFiniteAndUnscaleGpuKernel : public framework::OpKernel<T> { using MPDType = typename details::MPTypeTrait<T>::Type; public: void Compute(const framework::ExecutionContext& ctx) const { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); const auto xs = ctx.MultiInput<framework::Tensor>("X"); const auto* scale = ctx.Input<framework::Tensor>("Scale"); auto outs = ctx.MultiOutput<framework::Tensor>("Out"); auto* found_inf = ctx.Output<framework::Tensor>("FoundInfinite"); const MPDType* scale_data = scale->data<MPDType>(); bool* found_inf_data = found_inf->mutable_data<bool>(dev_ctx.GetPlace()); framework::Tensor inverse_scale = ctx.AllocateTmpTensor<MPDType, platform::CUDADeviceContext>({1}, dev_ctx); MPDType* inverse_scale_v = inverse_scale.template data<MPDType>(); InverseAndMemset<MPDType><<<1, 1, 0, dev_ctx.stream()>>>( scale_data, inverse_scale_v, found_inf_data); size_t xs_size = xs.size(); const auto& cpu_place = platform::CPUPlace(); // calculate each tensor's start index and copy to device auto h_starts_tensor = memory::Alloc(cpu_place, (xs_size + 1) * sizeof(int64_t)); int64_t* h_starts = reinterpret_cast<int64_t*>(h_starts_tensor->ptr()); auto d_starts_tensor = memory::Alloc(dev_ctx, (xs_size + 1) * sizeof(int64_t)); int64_t* d_starts = reinterpret_cast<int64_t*>(d_starts_tensor->ptr()); // the start index value of each tensor is // the sum of previous tensor's size. For example: // xs = [10, 0, 10, 10] ==> starts = [0, 10, 10, 20, 30] h_starts[0] = 0; for (int i = 1; i <= xs_size; i++) { h_starts[i] = h_starts[i - 1] + xs[i - 1]->numel(); } int64_t total_num = h_starts[xs_size]; memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()), d_starts, cpu_place, h_starts, (xs_size + 1) * sizeof(int64_t), dev_ctx.stream()); // copy each tensor's data address to device auto h_mem = memory::Alloc(cpu_place, 2 * xs_size * sizeof(T*)); const T** h_xs = reinterpret_cast<const T**>(h_mem->ptr()); T** h_outs = reinterpret_cast<T**>(h_mem->ptr()) + xs_size; auto d_mem = memory::Alloc(dev_ctx, 2 * xs_size * sizeof(T*)); const T** d_xs = reinterpret_cast<const T**>(d_mem->ptr()); T** d_outs = reinterpret_cast<T**>(d_mem->ptr()) + xs_size; for (size_t i = 0; i < xs_size; ++i) { h_xs[i] = xs[i]->data<T>(); h_outs[i] = outs[i]->mutable_data<T>(dev_ctx.GetPlace()); } memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()), d_xs, cpu_place, h_xs, 2 * xs_size * sizeof(T*), dev_ctx.stream()); // Launch Kernel int threads_per_block = std::min(static_cast<int64_t>(1024), total_num); int elements_per_block = threads_per_block * 20; // each thread deal with 20 number int blocks_per_grid = (total_num + elements_per_block - 1) / elements_per_block; VLOG(3) << "launch kernel"; CheckFiniteAndUnscale< T, MPDType><<<blocks_per_grid, threads_per_block, (xs_size + 1) * sizeof(int64_t), dev_ctx.stream()>>>( d_xs, inverse_scale_v, xs_size, d_starts, found_inf_data, d_outs); VLOG(3) << "finish kernel"; } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(check_finite_and_unscale, ops::CheckFiniteAndUnscaleGpuKernel<float>, ops::CheckFiniteAndUnscaleGpuKernel<double>, ops::CheckFiniteAndUnscaleGpuKernel<plat::float16>);
43b76aec644df413f8359a2f7d6d2e0c956c766e.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <hip/hip_runtime.h> #include "cupoch/geometry/image.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/utility/console.h" #include "cupoch/utility/platform.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/shader/texture_phong_shader.h" #include "cupoch/visualization/utility/color_map.h" using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { GLenum GetFormat(const geometry::Geometry &geometry) { auto it = gl_helper::texture_format_map_.find( ((const geometry::TriangleMesh &)geometry) .texture_.num_of_channels_); if (it == gl_helper::texture_format_map_.end()) { utility::LogWarning("Unknown texture format, abort!"); return false; } return it->second; } GLenum GetType(const geometry::Geometry &geometry) { auto it = gl_helper::texture_type_map_.find( ((const geometry::TriangleMesh &)geometry) .texture_.bytes_per_channel_); if (it == gl_helper::texture_type_map_.end()) { utility::LogWarning("Unknown texture type, abort!"); return false; } return it->second; } struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f *vertices, const Eigen::Vector3f *vertex_normals, const int *triangles, const Eigen::Vector3f *triangle_normals, const Eigen::Vector2f *triangle_uvs, RenderOption::MeshShadeOption shade_option) : vertices_(vertices), vertex_normals_(vertex_normals), triangles_(triangles), triangle_normals_(triangle_normals), triangle_uvs_(triangle_uvs), shade_option_(shade_option){}; const Eigen::Vector3f *vertices_; const Eigen::Vector3f *vertex_normals_; const int *triangles_; const Eigen::Vector3f *triangle_normals_; const Eigen::Vector2f *triangle_uvs_; const RenderOption::MeshShadeOption shade_option_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector2f> operator()(size_t k) const { int i = k / 3; int vi = triangles_[k]; if (shade_option_ == RenderOption::MeshShadeOption::FlatShade) { return thrust::make_tuple(vertices_[vi], triangle_normals_[i], triangle_uvs_[k]); } else { return thrust::make_tuple(vertices_[vi], vertex_normals_[vi], triangle_uvs_[k]); } } }; } // namespace bool TexturePhongShader::Compile() { if (CompileShaders(texture_phong_vertex_shader, NULL, texture_phong_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_normal_ = glGetAttribLocation(program_, "vertex_normal"); vertex_uv_ = glGetAttribLocation(program_, "vertex_uv"); MVP_ = glGetUniformLocation(program_, "MVP"); V_ = glGetUniformLocation(program_, "V"); M_ = glGetUniformLocation(program_, "M"); light_position_world_ = glGetUniformLocation(program_, "light_position_world_4"); light_color_ = glGetUniformLocation(program_, "light_color_4"); light_diffuse_power_ = glGetUniformLocation(program_, "light_diffuse_power_4"); light_specular_power_ = glGetUniformLocation(program_, "light_specular_power_4"); light_specular_shininess_ = glGetUniformLocation(program_, "light_specular_shininess_4"); light_ambient_ = glGetUniformLocation(program_, "light_ambient"); diffuse_texture_ = glGetUniformLocation(program_, "diffuse_texture"); return true; } void TexturePhongShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool TexturePhongShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); const size_t num_texture_height = GetTextureHeight(geometry); const size_t num_texture_width = GetTextureWidth(geometry); glGenTextures(1, &diffuse_texture_buffer_); glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_); GLenum format = GetFormat(geometry); GLenum type = GetType(geometry); glTexImage2D(GL_TEXTURE_2D, 0, format, num_texture_width, num_texture_height, 0, format, type, 0); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, hipGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_normal_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, hipGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_uv_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector2f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[2], vertex_uv_buffer_, hipGraphicsMapFlagsNone)); glGenBuffers(1, &diffuse_texture_pixel_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, diffuse_texture_pixel_buffer_); size_t texture_size = GetTextureSize(geometry); glBufferData(GL_PIXEL_UNPACK_BUFFER, texture_size, 0, GL_STATIC_DRAW); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[3], diffuse_texture_pixel_buffer_, hipGraphicsMapFlagsNone)); Eigen::Vector3f *raw_points_ptr; Eigen::Vector3f *raw_normals_ptr; Eigen::Vector2f *raw_uvs_ptr; uint8_t *raw_render_texture_ptr; size_t n_bytes; cudaSafeCall(hipGraphicsMapResources(4, cuda_graphics_resources_)); cudaSafeCall(hipGraphicsResourceGetMappedPointer( (void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsResourceGetMappedPointer( (void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1])); cudaSafeCall(hipGraphicsResourceGetMappedPointer( (void **)&raw_uvs_ptr, &n_bytes, cuda_graphics_resources_[2])); cudaSafeCall(hipGraphicsResourceGetMappedPointer( (void **)&raw_render_texture_ptr, &n_bytes, cuda_graphics_resources_[3])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr); thrust::device_ptr<Eigen::Vector2f> dev_uvs_ptr = thrust::device_pointer_cast(raw_uvs_ptr); thrust::device_ptr<uint8_t> dev_texture_ptr = thrust::device_pointer_cast(raw_render_texture_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr, dev_uvs_ptr, dev_texture_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(4); bound_ = true; return true; } bool TexturePhongShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } const size_t num_data_height = GetTextureHeight(geometry); const size_t num_data_width = GetTextureWidth(geometry); GLenum format = GetFormat(geometry); GLenum type = GetType(geometry); glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data()); glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data()); glUniformMatrix4fv(light_position_world_, 1, GL_FALSE, light_position_world_data_.data()); glUniformMatrix4fv(light_color_, 1, GL_FALSE, light_color_data_.data()); glUniform4fv(light_diffuse_power_, 1, light_diffuse_power_data_.data()); glUniform4fv(light_specular_power_, 1, light_specular_power_data_.data()); glUniform4fv(light_specular_shininess_, 1, light_specular_shininess_data_.data()); glUniform4fv(light_ambient_, 1, light_ambient_data_.data()); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, diffuse_texture_pixel_buffer_); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, num_data_width, num_data_height, format, type, 0); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glUniform1i(diffuse_texture_, 0); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_normal_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_uv_); glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_); glVertexAttribPointer(vertex_uv_, 2, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_normal_); glDisableVertexAttribArray(vertex_uv_); return true; } void TexturePhongShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(hipGraphicsUnregisterResource( cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsUnregisterResource( cuda_graphics_resources_[1])); cudaSafeCall(hipGraphicsUnregisterResource( cuda_graphics_resources_[2])); cudaSafeCall(hipGraphicsUnregisterResource( cuda_graphics_resources_[3])); } glDeleteBuffers(1, &diffuse_texture_buffer_); glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_normal_buffer_); glDeleteBuffers(1, &vertex_uv_buffer_); glDeleteTextures(1, &diffuse_texture_buffer_); bound_ = false; } } void TexturePhongShader::SetLighting(const ViewControl &view, const RenderOption &option) { const auto &box = view.GetBoundingBox(); light_position_world_data_.setOnes(); light_color_data_.setOnes(); for (int i = 0; i < 4; i++) { light_position_world_data_.block<3, 1>(0, i) = box.GetCenter().cast<GLfloat>() + (float)box.GetMaxExtent() * ((float)option.light_position_relative_[i](0) * view.GetRight() + (float)option.light_position_relative_[i](1) * view.GetUp() + (float)option.light_position_relative_[i](2) * view.GetFront()); light_color_data_.block<3, 1>(0, i) = option.light_color_[i].cast<GLfloat>(); } if (option.light_on_) { light_diffuse_power_data_ = Eigen::Vector4f(option.light_diffuse_power_).cast<GLfloat>(); light_specular_power_data_ = Eigen::Vector4f(option.light_specular_power_).cast<GLfloat>(); light_specular_shininess_data_ = Eigen::Vector4f(option.light_specular_shininess_) .cast<GLfloat>(); light_ambient_data_.block<3, 1>(0, 0) = option.light_ambient_color_.cast<GLfloat>(); light_ambient_data_(3) = 1.0f; } else { light_diffuse_power_data_ = gl_helper::GLVector4f::Zero(); light_specular_power_data_ = gl_helper::GLVector4f::Zero(); light_specular_shininess_data_ = gl_helper::GLVector4f::Ones(); light_ambient_data_ = gl_helper::GLVector4f(1.0f, 1.0f, 1.0f, 1.0f); } } bool TexturePhongShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } SetLighting(view, option); return true; } bool TexturePhongShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals, thrust::device_ptr<Eigen::Vector2f> &uvs, thrust::device_ptr<uint8_t> &texture_image) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } if (mesh.HasTriangleNormals() == false || mesh.HasVertexNormals() == false) { PrintShaderWarning("Binding failed because mesh has no normals."); PrintShaderWarning("Call ComputeVertexNormals() before binding."); return false; } copy_trianglemesh_functor func( thrust::raw_pointer_cast(mesh.vertices_.data()), thrust::raw_pointer_cast(mesh.vertex_normals_.data()), (int *)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.triangle_normals_.data()), thrust::raw_pointer_cast(mesh.triangle_uvs_.data()), option.mesh_shade_option_); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(mesh.triangles_.size() * 3), make_tuple_iterator(points, normals, uvs), func); thrust::copy(mesh.texture_.data_.begin(), mesh.texture_.data_.end(), texture_image); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t TexturePhongShaderForTriangleMesh::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; } size_t TexturePhongShaderForTriangleMesh::GetTextureSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.data_.size(); } size_t TexturePhongShaderForTriangleMesh::GetTextureHeight( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.height_; } size_t TexturePhongShaderForTriangleMesh::GetTextureWidth( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.width_; }
43b76aec644df413f8359a2f7d6d2e0c956c766e.cu
/** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <cuda_runtime.h> #include "cupoch/geometry/image.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/utility/console.h" #include "cupoch/utility/platform.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/shader/texture_phong_shader.h" #include "cupoch/visualization/utility/color_map.h" using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { GLenum GetFormat(const geometry::Geometry &geometry) { auto it = gl_helper::texture_format_map_.find( ((const geometry::TriangleMesh &)geometry) .texture_.num_of_channels_); if (it == gl_helper::texture_format_map_.end()) { utility::LogWarning("Unknown texture format, abort!"); return false; } return it->second; } GLenum GetType(const geometry::Geometry &geometry) { auto it = gl_helper::texture_type_map_.find( ((const geometry::TriangleMesh &)geometry) .texture_.bytes_per_channel_); if (it == gl_helper::texture_type_map_.end()) { utility::LogWarning("Unknown texture type, abort!"); return false; } return it->second; } struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f *vertices, const Eigen::Vector3f *vertex_normals, const int *triangles, const Eigen::Vector3f *triangle_normals, const Eigen::Vector2f *triangle_uvs, RenderOption::MeshShadeOption shade_option) : vertices_(vertices), vertex_normals_(vertex_normals), triangles_(triangles), triangle_normals_(triangle_normals), triangle_uvs_(triangle_uvs), shade_option_(shade_option){}; const Eigen::Vector3f *vertices_; const Eigen::Vector3f *vertex_normals_; const int *triangles_; const Eigen::Vector3f *triangle_normals_; const Eigen::Vector2f *triangle_uvs_; const RenderOption::MeshShadeOption shade_option_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector2f> operator()(size_t k) const { int i = k / 3; int vi = triangles_[k]; if (shade_option_ == RenderOption::MeshShadeOption::FlatShade) { return thrust::make_tuple(vertices_[vi], triangle_normals_[i], triangle_uvs_[k]); } else { return thrust::make_tuple(vertices_[vi], vertex_normals_[vi], triangle_uvs_[k]); } } }; } // namespace bool TexturePhongShader::Compile() { if (CompileShaders(texture_phong_vertex_shader, NULL, texture_phong_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_normal_ = glGetAttribLocation(program_, "vertex_normal"); vertex_uv_ = glGetAttribLocation(program_, "vertex_uv"); MVP_ = glGetUniformLocation(program_, "MVP"); V_ = glGetUniformLocation(program_, "V"); M_ = glGetUniformLocation(program_, "M"); light_position_world_ = glGetUniformLocation(program_, "light_position_world_4"); light_color_ = glGetUniformLocation(program_, "light_color_4"); light_diffuse_power_ = glGetUniformLocation(program_, "light_diffuse_power_4"); light_specular_power_ = glGetUniformLocation(program_, "light_specular_power_4"); light_specular_shininess_ = glGetUniformLocation(program_, "light_specular_shininess_4"); light_ambient_ = glGetUniformLocation(program_, "light_ambient"); diffuse_texture_ = glGetUniformLocation(program_, "diffuse_texture"); return true; } void TexturePhongShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool TexturePhongShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); const size_t num_texture_height = GetTextureHeight(geometry); const size_t num_texture_width = GetTextureWidth(geometry); glGenTextures(1, &diffuse_texture_buffer_); glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_); GLenum format = GetFormat(geometry); GLenum type = GetType(geometry); glTexImage2D(GL_TEXTURE_2D, 0, format, num_texture_width, num_texture_height, 0, format, type, 0); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_normal_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_uv_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector2f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[2], vertex_uv_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &diffuse_texture_pixel_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, diffuse_texture_pixel_buffer_); size_t texture_size = GetTextureSize(geometry); glBufferData(GL_PIXEL_UNPACK_BUFFER, texture_size, 0, GL_STATIC_DRAW); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[3], diffuse_texture_pixel_buffer_, cudaGraphicsMapFlagsNone)); Eigen::Vector3f *raw_points_ptr; Eigen::Vector3f *raw_normals_ptr; Eigen::Vector2f *raw_uvs_ptr; uint8_t *raw_render_texture_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(4, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_uvs_ptr, &n_bytes, cuda_graphics_resources_[2])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_render_texture_ptr, &n_bytes, cuda_graphics_resources_[3])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr); thrust::device_ptr<Eigen::Vector2f> dev_uvs_ptr = thrust::device_pointer_cast(raw_uvs_ptr); thrust::device_ptr<uint8_t> dev_texture_ptr = thrust::device_pointer_cast(raw_render_texture_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr, dev_uvs_ptr, dev_texture_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(4); bound_ = true; return true; } bool TexturePhongShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } const size_t num_data_height = GetTextureHeight(geometry); const size_t num_data_width = GetTextureWidth(geometry); GLenum format = GetFormat(geometry); GLenum type = GetType(geometry); glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data()); glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data()); glUniformMatrix4fv(light_position_world_, 1, GL_FALSE, light_position_world_data_.data()); glUniformMatrix4fv(light_color_, 1, GL_FALSE, light_color_data_.data()); glUniform4fv(light_diffuse_power_, 1, light_diffuse_power_data_.data()); glUniform4fv(light_specular_power_, 1, light_specular_power_data_.data()); glUniform4fv(light_specular_shininess_, 1, light_specular_shininess_data_.data()); glUniform4fv(light_ambient_, 1, light_ambient_data_.data()); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, diffuse_texture_pixel_buffer_); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, num_data_width, num_data_height, format, type, 0); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glUniform1i(diffuse_texture_, 0); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_normal_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_uv_); glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_); glVertexAttribPointer(vertex_uv_, 2, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_normal_); glDisableVertexAttribArray(vertex_uv_); return true; } void TexturePhongShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[1])); cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[2])); cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[3])); } glDeleteBuffers(1, &diffuse_texture_buffer_); glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_normal_buffer_); glDeleteBuffers(1, &vertex_uv_buffer_); glDeleteTextures(1, &diffuse_texture_buffer_); bound_ = false; } } void TexturePhongShader::SetLighting(const ViewControl &view, const RenderOption &option) { const auto &box = view.GetBoundingBox(); light_position_world_data_.setOnes(); light_color_data_.setOnes(); for (int i = 0; i < 4; i++) { light_position_world_data_.block<3, 1>(0, i) = box.GetCenter().cast<GLfloat>() + (float)box.GetMaxExtent() * ((float)option.light_position_relative_[i](0) * view.GetRight() + (float)option.light_position_relative_[i](1) * view.GetUp() + (float)option.light_position_relative_[i](2) * view.GetFront()); light_color_data_.block<3, 1>(0, i) = option.light_color_[i].cast<GLfloat>(); } if (option.light_on_) { light_diffuse_power_data_ = Eigen::Vector4f(option.light_diffuse_power_).cast<GLfloat>(); light_specular_power_data_ = Eigen::Vector4f(option.light_specular_power_).cast<GLfloat>(); light_specular_shininess_data_ = Eigen::Vector4f(option.light_specular_shininess_) .cast<GLfloat>(); light_ambient_data_.block<3, 1>(0, 0) = option.light_ambient_color_.cast<GLfloat>(); light_ambient_data_(3) = 1.0f; } else { light_diffuse_power_data_ = gl_helper::GLVector4f::Zero(); light_specular_power_data_ = gl_helper::GLVector4f::Zero(); light_specular_shininess_data_ = gl_helper::GLVector4f::Ones(); light_ambient_data_ = gl_helper::GLVector4f(1.0f, 1.0f, 1.0f, 1.0f); } } bool TexturePhongShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } SetLighting(view, option); return true; } bool TexturePhongShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals, thrust::device_ptr<Eigen::Vector2f> &uvs, thrust::device_ptr<uint8_t> &texture_image) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } if (mesh.HasTriangleNormals() == false || mesh.HasVertexNormals() == false) { PrintShaderWarning("Binding failed because mesh has no normals."); PrintShaderWarning("Call ComputeVertexNormals() before binding."); return false; } copy_trianglemesh_functor func( thrust::raw_pointer_cast(mesh.vertices_.data()), thrust::raw_pointer_cast(mesh.vertex_normals_.data()), (int *)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.triangle_normals_.data()), thrust::raw_pointer_cast(mesh.triangle_uvs_.data()), option.mesh_shade_option_); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(mesh.triangles_.size() * 3), make_tuple_iterator(points, normals, uvs), func); thrust::copy(mesh.texture_.data_.begin(), mesh.texture_.data_.end(), texture_image); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t TexturePhongShaderForTriangleMesh::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; } size_t TexturePhongShaderForTriangleMesh::GetTextureSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.data_.size(); } size_t TexturePhongShaderForTriangleMesh::GetTextureHeight( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.height_; } size_t TexturePhongShaderForTriangleMesh::GetTextureWidth( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.width_; }
73451fed6824d24ad5429631b000bf91fa8f4169.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdexcept> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/for_each.h> #include <thrust/sort.h> #include <thrust/unique.h> #include <thrust/remove.h> #include <thrust/sequence.h> #include <thrust/gather.h> #include <thrust/count.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <cudf/utilities/error.hpp> #include "nvstrings/NVStrings.h" #include "./NVStringsImpl.h" #include "../custring_view.cuh" #include "../util.h" // takes scattered pointers to custring_view objects and // initializes a new NVStringsImpl void NVStrings_init_from_custrings( NVStringsImpl* pImpl, custring_view_array d_strings, unsigned int count ) { auto execpol = rmm::exec_policy(0); // get individual sizes rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( dstr ) d_sizes[idx] = ALIGN_SIZE(dstr->alloc_size()); }); // create output object char* d_buffer = pImpl->createMemoryFor(d_sizes); if( d_buffer==0 ) return; // this is valid // create offsets rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // finally, copy the strings custring_view_array d_results = pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_buffer, d_offsets, d_results] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = custring_view::create_from(buffer,*dstr); }); // } // create a new instance containing only the strings at the specified positions // position values can be in any order and can even be repeated NVStrings* NVStrings::gather( const int* pos, unsigned int elements, bool bdevmem ) { unsigned int count = size(); if( count==0 || elements==0 || pos==0 ) return new NVStrings(0); auto execpol = rmm::exec_policy(0); const int* d_pos = pos; if( !bdevmem ) { // copy indexes to device memory d_pos = const_cast<const int*>(device_alloc<int>(elements,0)); CUDA_TRY(hipMemcpyAsync((void*)d_pos,pos,elements*sizeof(int),hipMemcpyHostToDevice)) } // create working memory rmm::device_vector<custring_view*> results(elements,nullptr); auto d_results = results.data().get(); rmm::device_vector<bool> flags(elements,false); auto d_flags = flags.data().get(); custring_view_array d_strings = pImpl->getStringsPtr(); // do the gather thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elements, [d_strings, d_pos, count, d_results, d_flags] __device__(unsigned int idx){ int pos = d_pos[idx]; if( (pos < 0) || (pos >= count) ) d_flags[idx] = true; else d_results[idx] = d_strings[pos]; }); // check for invalid position values if( thrust::count(execpol->on(0), flags.begin(), flags.end(), true) ) { if( !bdevmem ) RMM_FREE((void*)d_pos,0); throw std::out_of_range("gather position value out of range"); } // build resulting instance NVStrings* rtn = new NVStrings(elements); NVStrings_init_from_custrings(rtn->pImpl, d_results, elements); if( !bdevmem ) RMM_FREE((void*)d_pos,0); return rtn; } // create a new instance containing only the strings where the corresponding mask value is true NVStrings* NVStrings::gather( const bool* mask, bool bdevmem ) { size_t count = size(); if( count==0 || mask==nullptr ) return new NVStrings(0); // copy mask array to device memory if necessary auto execpol = rmm::exec_policy(0); const bool* d_mask = mask; if( !bdevmem ) { d_mask = const_cast<const bool*>(device_alloc<bool>(count,0)); CUDA_TRY(hipMemcpyAsync((void*)d_mask,mask,count*sizeof(mask[0]),hipMemcpyHostToDevice,0)) } // create list of index positions from the mask array rmm::device_vector<int> indexes(count); auto d_indexes = indexes.data().get(); auto d_indexes_end = thrust::copy_if(execpol->on(0), thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(count), d_indexes, [d_mask] __device__ (int idx) { return d_mask[idx]; }); // done with the mask if( !bdevmem ) RMM_FREE((void*)d_mask,0); count = d_indexes_end - d_indexes; return gather( d_indexes, count, true ); } // // s1 = ['a','b,'c','d'] // s2 = ['e','f'] // pos = [1,3] -- must be the same length as s2 // s3 = s1.scatter(s2,pos) // ['a','e','c','f'] // NVStrings* NVStrings::scatter( NVStrings& strs, const int* pos, bool bdevmem ) { unsigned int count = size(); unsigned int elements = strs.size(); if( pos==0 ) throw std::invalid_argument("position parameter cannot be null"); auto execpol = rmm::exec_policy(0); const int* d_pos = pos; if( !bdevmem ) { // copy indexes to device memory d_pos = const_cast<const int*>(device_alloc<int>(elements,0)); CUDA_TRY(hipMemcpyAsync((void*)d_pos,pos,elements*sizeof(int),hipMemcpyHostToDevice)) } // The most efficient method here is to build pointer array // applying the parameters to the specified positions and // then build a new instance from the resulting pointers. rmm::device_vector<custring_view*> results(count,nullptr); auto d_results = results.data().get(); custring_view_array d_strings = pImpl->getStringsPtr(); custring_view_array d_new_strings = strs.pImpl->getStringsPtr(); thrust::copy( execpol->on(0), d_strings, d_strings+count, d_results ); thrust::scatter( execpol->on(0), d_new_strings, d_new_strings+elements, d_pos, d_results ); // build resulting instance NVStrings* rtn = new NVStrings(count); NVStrings_init_from_custrings(rtn->pImpl, d_results, count); if( !bdevmem ) RMM_FREE((void*)d_pos,0); return rtn; } // // s1 = ['a','b,'c','d'] // pos = [1,3] // s3 = s1.scatter('e',pos,2) // ['a','e','c','e'] // NVStrings* NVStrings::scatter( const char* str, const int* pos, unsigned int elements, bool bdevmem ) { unsigned int count = size(); if( pos==nullptr ) throw std::invalid_argument("parameter cannot be null"); auto execpol = rmm::exec_policy(0); // copy string to device custring_view* d_repl = custring_from_host(str); const int* d_pos = pos; if( !bdevmem ) { // copy indexes to device memory d_pos = const_cast<const int*>(device_alloc<int>(elements,0)); CUDA_TRY(hipMemcpyAsync((void*)d_pos,pos,elements*sizeof(int),hipMemcpyHostToDevice)) } // create result output array rmm::device_vector<custring_view*> results(count,nullptr); auto d_results = results.data().get(); custring_view_array d_strings = pImpl->getStringsPtr(); thrust::copy( execpol->on(0), d_strings, d_strings+count, d_results ); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elements, [d_pos, count, d_repl, d_results] __device__ (unsigned int idx) { int pos = d_pos[idx]; if( (pos >= 0) && (pos < count) ) d_results[pos] = d_repl; }); // build resulting instance NVStrings* rtn = new NVStrings(count); NVStrings_init_from_custrings(rtn->pImpl, d_results, count); if( !bdevmem ) RMM_FREE((void*)d_pos,0); RMM_FREE((void*)d_repl,0); return rtn; } NVStrings* NVStrings::sublist( unsigned int start, unsigned int end, int step ) { unsigned int count = size(); if( end > count ) end = count; if( start > count ) start = count; if( step==0 ) step = 1; if( start == end ) return new NVStrings(0); if( ((step > 0) && (start > end)) || ((step < 0) && (start < end)) ) return new NVStrings(0); unsigned int elems = (unsigned int)std::abs((int)(end-start)); unsigned int abs_step = (unsigned int)std::abs(step); elems = (elems + abs_step -1)/abs_step; // adjust for steps auto execpol = rmm::exec_policy(0); rmm::device_vector<int> indexes(elems); thrust::sequence(execpol->on(0),indexes.begin(),indexes.end(),(int)start,step); return gather(indexes.data().get(),elems,true); } // remove the specified strings and return a new instance NVStrings* NVStrings::remove_strings( const int* pos, unsigned int elements, bool bdevmem ) { unsigned int count = size(); if( count==0 ) return new NVStrings(0); if( elements==0 || pos==0 ) return copy(); auto execpol = rmm::exec_policy(0); int* dpos = device_alloc<int>(elements,0); if( bdevmem ) CUDA_TRY( hipMemcpyAsync((void*)dpos,pos,elements*sizeof(unsigned int),hipMemcpyDeviceToDevice)) else CUDA_TRY( hipMemcpyAsync((void*)dpos,pos,elements*sizeof(unsigned int),hipMemcpyHostToDevice)) // sort the position values thrust::sort(execpol->on(0),dpos,dpos+elements,thrust::greater<int>()); // also should remove duplicates int* nend = thrust::unique(execpol->on(0),dpos,dpos+elements,thrust::equal_to<int>()); elements = (unsigned int)(nend - dpos); if( count < elements ) { RMM_FREE(dpos,0); fprintf(stderr,"remove_strings: more positions (%u) specified than the number of strings (%u)\n",elements,count); return nullptr; } // build array to hold positions which are not to be removed by marking deleted positions with -1 rmm::device_vector<int> dnpos(count); thrust::sequence(execpol->on(0),dnpos.begin(),dnpos.end()); int* d_npos = dnpos.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elements, [dpos, d_npos, count] __device__ (unsigned int idx) { int pos = dpos[idx]; if( (pos >= 0) && (pos < count) ) d_npos[pos] = -1; }); // now remove the positions marked with -1 int* dend = thrust::remove_if(execpol->on(0),d_npos,d_npos+count,[] __device__ (int val) { return val < 0; }); unsigned int new_count = (unsigned int)(dend-d_npos); // gather string pointers based on indexes in dnpos (new-positions) custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<custring_view*> results(new_count,nullptr); custring_view_array d_results = results.data().get(); thrust::gather(execpol->on(0),d_npos,d_npos+new_count,d_strings,d_results); // create output object from results pointers NVStrings* rtn = new NVStrings(new_count); NVStrings_init_from_custrings(rtn->pImpl, d_results, new_count); RMM_FREE(dpos,0); return rtn; } // this sorts the strings into a new instance; // a sorted strings list can improve performance by reducing divergence NVStrings* NVStrings::sort( sorttype stype, bool ascending, bool nullfirst ) { unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); auto execpol = rmm::exec_policy(0); // copy the pointers so we can sort them rmm::device_vector<custring_view*> results(count,nullptr); custring_view_array d_results = results.data().get(); thrust::copy( execpol->on(0), d_strings, d_strings+count, d_results ); thrust::sort(execpol->on(0), d_results, d_results+count, [stype, ascending, nullfirst] __device__( custring_view*& lhs, custring_view*& rhs ) { if( lhs==0 || rhs==0 ) return (nullfirst ? rhs!=0 : lhs!=0); // null < non-null // allow sorting by name and length int diff = 0; if( stype & NVStrings::length ) diff = lhs->size() - rhs->size(); if( diff==0 && (stype & NVStrings::name) ) diff = lhs->compare(*rhs); return (ascending ? (diff < 0) : (diff > 0)); }); // build new instance from the sorted pointers NVStrings* rtn = new NVStrings(count); NVStrings_init_from_custrings( rtn->pImpl, d_results, count ); return rtn; } // just provide the index order and leave the strings intact int NVStrings::order( sorttype stype, bool ascending, unsigned int* indexes, bool nullfirst, bool todevice ) { unsigned int count = size(); unsigned int* d_indexes = indexes; auto execpol = rmm::exec_policy(0); if( !todevice ) d_indexes = device_alloc<unsigned int>(count,0); thrust::sequence(execpol->on(0), d_indexes, d_indexes+count); // custring_view_array d_strings = pImpl->getStringsPtr(); thrust::sort(execpol->on(0), d_indexes, d_indexes+count, [d_strings, stype, ascending, nullfirst] __device__( unsigned int& lidx, unsigned int& ridx ) { custring_view* lhs = d_strings[lidx]; custring_view* rhs = d_strings[ridx]; if( lhs==0 || rhs==0 ) return (nullfirst ? rhs!=0 : lhs!=0); // allow sorting by name and length int diff = 0; if( stype & NVStrings::length ) diff = lhs->size() - rhs->size(); if( diff==0 && (stype & NVStrings::name) ) diff = lhs->compare(*rhs); return (ascending ? (diff < 0) : (diff > 0)); }); // if( !todevice ) { CUDA_TRY(hipMemcpyAsync(indexes,d_indexes,count*sizeof(unsigned int),hipMemcpyDeviceToHost)) RMM_FREE(d_indexes,0); } return 0; }
73451fed6824d24ad5429631b000bf91fa8f4169.cu
/* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdexcept> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/for_each.h> #include <thrust/sort.h> #include <thrust/unique.h> #include <thrust/remove.h> #include <thrust/sequence.h> #include <thrust/gather.h> #include <thrust/count.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <cudf/utilities/error.hpp> #include "nvstrings/NVStrings.h" #include "./NVStringsImpl.h" #include "../custring_view.cuh" #include "../util.h" // takes scattered pointers to custring_view objects and // initializes a new NVStringsImpl void NVStrings_init_from_custrings( NVStringsImpl* pImpl, custring_view_array d_strings, unsigned int count ) { auto execpol = rmm::exec_policy(0); // get individual sizes rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( dstr ) d_sizes[idx] = ALIGN_SIZE(dstr->alloc_size()); }); // create output object char* d_buffer = pImpl->createMemoryFor(d_sizes); if( d_buffer==0 ) return; // this is valid // create offsets rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // finally, copy the strings custring_view_array d_results = pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_buffer, d_offsets, d_results] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( !dstr ) return; char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = custring_view::create_from(buffer,*dstr); }); // } // create a new instance containing only the strings at the specified positions // position values can be in any order and can even be repeated NVStrings* NVStrings::gather( const int* pos, unsigned int elements, bool bdevmem ) { unsigned int count = size(); if( count==0 || elements==0 || pos==0 ) return new NVStrings(0); auto execpol = rmm::exec_policy(0); const int* d_pos = pos; if( !bdevmem ) { // copy indexes to device memory d_pos = const_cast<const int*>(device_alloc<int>(elements,0)); CUDA_TRY(cudaMemcpyAsync((void*)d_pos,pos,elements*sizeof(int),cudaMemcpyHostToDevice)) } // create working memory rmm::device_vector<custring_view*> results(elements,nullptr); auto d_results = results.data().get(); rmm::device_vector<bool> flags(elements,false); auto d_flags = flags.data().get(); custring_view_array d_strings = pImpl->getStringsPtr(); // do the gather thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elements, [d_strings, d_pos, count, d_results, d_flags] __device__(unsigned int idx){ int pos = d_pos[idx]; if( (pos < 0) || (pos >= count) ) d_flags[idx] = true; else d_results[idx] = d_strings[pos]; }); // check for invalid position values if( thrust::count(execpol->on(0), flags.begin(), flags.end(), true) ) { if( !bdevmem ) RMM_FREE((void*)d_pos,0); throw std::out_of_range("gather position value out of range"); } // build resulting instance NVStrings* rtn = new NVStrings(elements); NVStrings_init_from_custrings(rtn->pImpl, d_results, elements); if( !bdevmem ) RMM_FREE((void*)d_pos,0); return rtn; } // create a new instance containing only the strings where the corresponding mask value is true NVStrings* NVStrings::gather( const bool* mask, bool bdevmem ) { size_t count = size(); if( count==0 || mask==nullptr ) return new NVStrings(0); // copy mask array to device memory if necessary auto execpol = rmm::exec_policy(0); const bool* d_mask = mask; if( !bdevmem ) { d_mask = const_cast<const bool*>(device_alloc<bool>(count,0)); CUDA_TRY(cudaMemcpyAsync((void*)d_mask,mask,count*sizeof(mask[0]),cudaMemcpyHostToDevice,0)) } // create list of index positions from the mask array rmm::device_vector<int> indexes(count); auto d_indexes = indexes.data().get(); auto d_indexes_end = thrust::copy_if(execpol->on(0), thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(count), d_indexes, [d_mask] __device__ (int idx) { return d_mask[idx]; }); // done with the mask if( !bdevmem ) RMM_FREE((void*)d_mask,0); count = d_indexes_end - d_indexes; return gather( d_indexes, count, true ); } // // s1 = ['a','b,'c','d'] // s2 = ['e','f'] // pos = [1,3] -- must be the same length as s2 // s3 = s1.scatter(s2,pos) // ['a','e','c','f'] // NVStrings* NVStrings::scatter( NVStrings& strs, const int* pos, bool bdevmem ) { unsigned int count = size(); unsigned int elements = strs.size(); if( pos==0 ) throw std::invalid_argument("position parameter cannot be null"); auto execpol = rmm::exec_policy(0); const int* d_pos = pos; if( !bdevmem ) { // copy indexes to device memory d_pos = const_cast<const int*>(device_alloc<int>(elements,0)); CUDA_TRY(cudaMemcpyAsync((void*)d_pos,pos,elements*sizeof(int),cudaMemcpyHostToDevice)) } // The most efficient method here is to build pointer array // applying the parameters to the specified positions and // then build a new instance from the resulting pointers. rmm::device_vector<custring_view*> results(count,nullptr); auto d_results = results.data().get(); custring_view_array d_strings = pImpl->getStringsPtr(); custring_view_array d_new_strings = strs.pImpl->getStringsPtr(); thrust::copy( execpol->on(0), d_strings, d_strings+count, d_results ); thrust::scatter( execpol->on(0), d_new_strings, d_new_strings+elements, d_pos, d_results ); // build resulting instance NVStrings* rtn = new NVStrings(count); NVStrings_init_from_custrings(rtn->pImpl, d_results, count); if( !bdevmem ) RMM_FREE((void*)d_pos,0); return rtn; } // // s1 = ['a','b,'c','d'] // pos = [1,3] // s3 = s1.scatter('e',pos,2) // ['a','e','c','e'] // NVStrings* NVStrings::scatter( const char* str, const int* pos, unsigned int elements, bool bdevmem ) { unsigned int count = size(); if( pos==nullptr ) throw std::invalid_argument("parameter cannot be null"); auto execpol = rmm::exec_policy(0); // copy string to device custring_view* d_repl = custring_from_host(str); const int* d_pos = pos; if( !bdevmem ) { // copy indexes to device memory d_pos = const_cast<const int*>(device_alloc<int>(elements,0)); CUDA_TRY(cudaMemcpyAsync((void*)d_pos,pos,elements*sizeof(int),cudaMemcpyHostToDevice)) } // create result output array rmm::device_vector<custring_view*> results(count,nullptr); auto d_results = results.data().get(); custring_view_array d_strings = pImpl->getStringsPtr(); thrust::copy( execpol->on(0), d_strings, d_strings+count, d_results ); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elements, [d_pos, count, d_repl, d_results] __device__ (unsigned int idx) { int pos = d_pos[idx]; if( (pos >= 0) && (pos < count) ) d_results[pos] = d_repl; }); // build resulting instance NVStrings* rtn = new NVStrings(count); NVStrings_init_from_custrings(rtn->pImpl, d_results, count); if( !bdevmem ) RMM_FREE((void*)d_pos,0); RMM_FREE((void*)d_repl,0); return rtn; } NVStrings* NVStrings::sublist( unsigned int start, unsigned int end, int step ) { unsigned int count = size(); if( end > count ) end = count; if( start > count ) start = count; if( step==0 ) step = 1; if( start == end ) return new NVStrings(0); if( ((step > 0) && (start > end)) || ((step < 0) && (start < end)) ) return new NVStrings(0); unsigned int elems = (unsigned int)std::abs((int)(end-start)); unsigned int abs_step = (unsigned int)std::abs(step); elems = (elems + abs_step -1)/abs_step; // adjust for steps auto execpol = rmm::exec_policy(0); rmm::device_vector<int> indexes(elems); thrust::sequence(execpol->on(0),indexes.begin(),indexes.end(),(int)start,step); return gather(indexes.data().get(),elems,true); } // remove the specified strings and return a new instance NVStrings* NVStrings::remove_strings( const int* pos, unsigned int elements, bool bdevmem ) { unsigned int count = size(); if( count==0 ) return new NVStrings(0); if( elements==0 || pos==0 ) return copy(); auto execpol = rmm::exec_policy(0); int* dpos = device_alloc<int>(elements,0); if( bdevmem ) CUDA_TRY( cudaMemcpyAsync((void*)dpos,pos,elements*sizeof(unsigned int),cudaMemcpyDeviceToDevice)) else CUDA_TRY( cudaMemcpyAsync((void*)dpos,pos,elements*sizeof(unsigned int),cudaMemcpyHostToDevice)) // sort the position values thrust::sort(execpol->on(0),dpos,dpos+elements,thrust::greater<int>()); // also should remove duplicates int* nend = thrust::unique(execpol->on(0),dpos,dpos+elements,thrust::equal_to<int>()); elements = (unsigned int)(nend - dpos); if( count < elements ) { RMM_FREE(dpos,0); fprintf(stderr,"remove_strings: more positions (%u) specified than the number of strings (%u)\n",elements,count); return nullptr; } // build array to hold positions which are not to be removed by marking deleted positions with -1 rmm::device_vector<int> dnpos(count); thrust::sequence(execpol->on(0),dnpos.begin(),dnpos.end()); int* d_npos = dnpos.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elements, [dpos, d_npos, count] __device__ (unsigned int idx) { int pos = dpos[idx]; if( (pos >= 0) && (pos < count) ) d_npos[pos] = -1; }); // now remove the positions marked with -1 int* dend = thrust::remove_if(execpol->on(0),d_npos,d_npos+count,[] __device__ (int val) { return val < 0; }); unsigned int new_count = (unsigned int)(dend-d_npos); // gather string pointers based on indexes in dnpos (new-positions) custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<custring_view*> results(new_count,nullptr); custring_view_array d_results = results.data().get(); thrust::gather(execpol->on(0),d_npos,d_npos+new_count,d_strings,d_results); // create output object from results pointers NVStrings* rtn = new NVStrings(new_count); NVStrings_init_from_custrings(rtn->pImpl, d_results, new_count); RMM_FREE(dpos,0); return rtn; } // this sorts the strings into a new instance; // a sorted strings list can improve performance by reducing divergence NVStrings* NVStrings::sort( sorttype stype, bool ascending, bool nullfirst ) { unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); auto execpol = rmm::exec_policy(0); // copy the pointers so we can sort them rmm::device_vector<custring_view*> results(count,nullptr); custring_view_array d_results = results.data().get(); thrust::copy( execpol->on(0), d_strings, d_strings+count, d_results ); thrust::sort(execpol->on(0), d_results, d_results+count, [stype, ascending, nullfirst] __device__( custring_view*& lhs, custring_view*& rhs ) { if( lhs==0 || rhs==0 ) return (nullfirst ? rhs!=0 : lhs!=0); // null < non-null // allow sorting by name and length int diff = 0; if( stype & NVStrings::length ) diff = lhs->size() - rhs->size(); if( diff==0 && (stype & NVStrings::name) ) diff = lhs->compare(*rhs); return (ascending ? (diff < 0) : (diff > 0)); }); // build new instance from the sorted pointers NVStrings* rtn = new NVStrings(count); NVStrings_init_from_custrings( rtn->pImpl, d_results, count ); return rtn; } // just provide the index order and leave the strings intact int NVStrings::order( sorttype stype, bool ascending, unsigned int* indexes, bool nullfirst, bool todevice ) { unsigned int count = size(); unsigned int* d_indexes = indexes; auto execpol = rmm::exec_policy(0); if( !todevice ) d_indexes = device_alloc<unsigned int>(count,0); thrust::sequence(execpol->on(0), d_indexes, d_indexes+count); // custring_view_array d_strings = pImpl->getStringsPtr(); thrust::sort(execpol->on(0), d_indexes, d_indexes+count, [d_strings, stype, ascending, nullfirst] __device__( unsigned int& lidx, unsigned int& ridx ) { custring_view* lhs = d_strings[lidx]; custring_view* rhs = d_strings[ridx]; if( lhs==0 || rhs==0 ) return (nullfirst ? rhs!=0 : lhs!=0); // allow sorting by name and length int diff = 0; if( stype & NVStrings::length ) diff = lhs->size() - rhs->size(); if( diff==0 && (stype & NVStrings::name) ) diff = lhs->compare(*rhs); return (ascending ? (diff < 0) : (diff > 0)); }); // if( !todevice ) { CUDA_TRY(cudaMemcpyAsync(indexes,d_indexes,count*sizeof(unsigned int),cudaMemcpyDeviceToHost)) RMM_FREE(d_indexes,0); } return 0; }
2284b0f75b347e3bfdd290fd9de1f72650d7cbee.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/cuda_graph_support.h" #include "oneflow/user/kernels/cublas_fused_mlp_util.cuh" // CUBLAS_AUX_EPILOGUE only support in cuda11.4 or higher version, in cuda11.4 it need static link. #if TORCH_HIP_VERSION >= 11060 namespace oneflow { namespace { template<typename T> class CublasBiasAddReluMatmulGradKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport { public: CublasBiasAddReluMatmulGradKernel() = default; ~CublasBiasAddReluMatmulGradKernel() override = default; std::shared_ptr<user_op::OpKernelCache> InitOpKernelCache( user_op::KernelCacheContext* ctx) const override { return CreateCublasFusedMLPKernelCache(); } private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState*, const user_op::OpKernelCache* cache) const override { const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const user_op::Tensor* weight = ctx->Tensor4ArgNameAndIndex("weight", 0); const user_op::Tensor* aux = ctx->Tensor4ArgNameAndIndex("aux", 0); user_op::Tensor* d_bias = ctx->Tensor4ArgNameAndIndex("d_bias", 0); user_op::Tensor* d_grad = ctx->Tensor4ArgNameAndIndex("d_grad", 0); const auto* matmul_grad_cache = CHECK_NOTNULL(dynamic_cast<const CublasFusedMLPKernelCache*>(cache)); auto* cuda_stream = ctx->stream()->As<ep::CudaStream>(); const DataType data_type = dy->data_type(); const hipblasComputeType_t cublas_compute_dtype = GetComputeType(data_type); const hipDataType cuda_data_type = GetCudaDataType(data_type); size_t cublas_m = 0, cublas_n = 0, cublas_k = 0; int64_t cublas_lda = 0, cublas_ldb = 0, cublas_ldc = 0; const double alpha = ctx->Attr<double>("alpha"); const auto sp_alpha = GetCublasScalarParameter(alpha, cublas_compute_dtype); const double beta = 0.0; const auto sp_beta = GetCublasScalarParameter(beta, cublas_compute_dtype); // currently only support 2D matmul. DimVector dy_shape(2); dy->shape_view().ToDimVector(&dy_shape); DimVector weight_shape(2); weight->shape_view().ToDimVector(&weight_shape); cublasLtEpilogue_t epilogue = CUBLASLT_EPILOGUE_DRELU_BGRAD; InferMatmulCublasMNK(dy_shape, weight_shape, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, &cublas_m, &cublas_n, &cublas_k, &cublas_lda, &cublas_ldb, &cublas_ldc); SetCublasAttr(matmul_grad_cache, cublas_compute_dtype, cuda_data_type, /*need_aux=*/true, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, epilogue, d_bias->dptr(), aux->dptr(), cublas_m, cublas_n, cublas_k, cublas_lda, cublas_ldb, cublas_ldc); /* a = dy, b = weight cublas_a=weight, cublas_b=dy */ OF_CUBLAS_CHECK( cublasLtMatmul(cuda_stream->cublas_lt_handle(), matmul_grad_cache->operation_desc, &sp_alpha, weight->dptr(), matmul_grad_cache->cublas_a_desc, dy->dptr(), matmul_grad_cache->cublas_b_desc, &sp_beta, d_grad->mut_dptr(), matmul_grad_cache->cublas_c_desc, d_grad->mut_dptr(), matmul_grad_cache->cublas_c_desc, nullptr, cuda_stream->cublas_workspace(), cuda_stream->cublas_workspace_size(), cuda_stream->cuda_stream())); }; bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_CUBLAS_BIAS_ADD_RELU_MATMUL_GRAD_KERNEL(dtype) \ REGISTER_USER_KERNEL("cublas_bias_add_relu_matmul_grad") \ .SetCreateFn<CublasBiasAddReluMatmulGradKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \ && (user_op::HobDataType("weight", 0) == GetDataType<dtype>::value)); REGISTER_CUBLAS_BIAS_ADD_RELU_MATMUL_GRAD_KERNEL(float) REGISTER_CUBLAS_BIAS_ADD_RELU_MATMUL_GRAD_KERNEL(double) REGISTER_CUBLAS_BIAS_ADD_RELU_MATMUL_GRAD_KERNEL(half) } // namespace } // namespace oneflow #endif // TORCH_HIP_VERSION >= 11060
2284b0f75b347e3bfdd290fd9de1f72650d7cbee.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/cuda_graph_support.h" #include "oneflow/user/kernels/cublas_fused_mlp_util.cuh" // CUBLAS_AUX_EPILOGUE only support in cuda11.4 or higher version, in cuda11.4 it need static link. #if CUDA_VERSION >= 11060 namespace oneflow { namespace { template<typename T> class CublasBiasAddReluMatmulGradKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport { public: CublasBiasAddReluMatmulGradKernel() = default; ~CublasBiasAddReluMatmulGradKernel() override = default; std::shared_ptr<user_op::OpKernelCache> InitOpKernelCache( user_op::KernelCacheContext* ctx) const override { return CreateCublasFusedMLPKernelCache(); } private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState*, const user_op::OpKernelCache* cache) const override { const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const user_op::Tensor* weight = ctx->Tensor4ArgNameAndIndex("weight", 0); const user_op::Tensor* aux = ctx->Tensor4ArgNameAndIndex("aux", 0); user_op::Tensor* d_bias = ctx->Tensor4ArgNameAndIndex("d_bias", 0); user_op::Tensor* d_grad = ctx->Tensor4ArgNameAndIndex("d_grad", 0); const auto* matmul_grad_cache = CHECK_NOTNULL(dynamic_cast<const CublasFusedMLPKernelCache*>(cache)); auto* cuda_stream = ctx->stream()->As<ep::CudaStream>(); const DataType data_type = dy->data_type(); const cublasComputeType_t cublas_compute_dtype = GetComputeType(data_type); const cudaDataType_t cuda_data_type = GetCudaDataType(data_type); size_t cublas_m = 0, cublas_n = 0, cublas_k = 0; int64_t cublas_lda = 0, cublas_ldb = 0, cublas_ldc = 0; const double alpha = ctx->Attr<double>("alpha"); const auto sp_alpha = GetCublasScalarParameter(alpha, cublas_compute_dtype); const double beta = 0.0; const auto sp_beta = GetCublasScalarParameter(beta, cublas_compute_dtype); // currently only support 2D matmul. DimVector dy_shape(2); dy->shape_view().ToDimVector(&dy_shape); DimVector weight_shape(2); weight->shape_view().ToDimVector(&weight_shape); cublasLtEpilogue_t epilogue = CUBLASLT_EPILOGUE_DRELU_BGRAD; InferMatmulCublasMNK(dy_shape, weight_shape, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, &cublas_m, &cublas_n, &cublas_k, &cublas_lda, &cublas_ldb, &cublas_ldc); SetCublasAttr(matmul_grad_cache, cublas_compute_dtype, cuda_data_type, /*need_aux=*/true, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, epilogue, d_bias->dptr(), aux->dptr(), cublas_m, cublas_n, cublas_k, cublas_lda, cublas_ldb, cublas_ldc); /* a = dy, b = weight cublas_a=weight, cublas_b=dy */ OF_CUBLAS_CHECK( cublasLtMatmul(cuda_stream->cublas_lt_handle(), matmul_grad_cache->operation_desc, &sp_alpha, weight->dptr(), matmul_grad_cache->cublas_a_desc, dy->dptr(), matmul_grad_cache->cublas_b_desc, &sp_beta, d_grad->mut_dptr(), matmul_grad_cache->cublas_c_desc, d_grad->mut_dptr(), matmul_grad_cache->cublas_c_desc, nullptr, cuda_stream->cublas_workspace(), cuda_stream->cublas_workspace_size(), cuda_stream->cuda_stream())); }; bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_CUBLAS_BIAS_ADD_RELU_MATMUL_GRAD_KERNEL(dtype) \ REGISTER_USER_KERNEL("cublas_bias_add_relu_matmul_grad") \ .SetCreateFn<CublasBiasAddReluMatmulGradKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \ && (user_op::HobDataType("weight", 0) == GetDataType<dtype>::value)); REGISTER_CUBLAS_BIAS_ADD_RELU_MATMUL_GRAD_KERNEL(float) REGISTER_CUBLAS_BIAS_ADD_RELU_MATMUL_GRAD_KERNEL(double) REGISTER_CUBLAS_BIAS_ADD_RELU_MATMUL_GRAD_KERNEL(half) } // namespace } // namespace oneflow #endif // CUDA_VERSION >= 11060
39a3c39cd4950aa9695784cddf7e4148f6110984.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "reduceUnrolling8New.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *g_idata = NULL; hipMalloc(&g_idata, XSIZE*YSIZE); int *g_odata = NULL; hipMalloc(&g_odata, XSIZE*YSIZE); unsigned int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( reduceUnrolling8New), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( reduceUnrolling8New), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( reduceUnrolling8New), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
39a3c39cd4950aa9695784cddf7e4148f6110984.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "reduceUnrolling8New.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *g_idata = NULL; cudaMalloc(&g_idata, XSIZE*YSIZE); int *g_odata = NULL; cudaMalloc(&g_odata, XSIZE*YSIZE); unsigned int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); reduceUnrolling8New<<<gridBlock,threadBlock>>>(g_idata,g_odata,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { reduceUnrolling8New<<<gridBlock,threadBlock>>>(g_idata,g_odata,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { reduceUnrolling8New<<<gridBlock,threadBlock>>>(g_idata,g_odata,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
759af7bd9dbc5382d46b8042d908917ffe6016c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zlascl_diag.cu normal z -> s, Sat Nov 15 19:53:59 2014 */ #include "common_magma.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void slascl_diag_lower(int m, int n, const float* D, int ldd, float* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] /= D[j+j*ldd]; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void slascl_diag_upper(int m, int n, const float *D, int ldd, float *A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] /= D[ind+ind*ldd]; } } /** Purpose ------- SLASCL2 scales the M by N real matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- \param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. \param[in] m INTEGER The number of rows of the matrix A. M >= 0. \param[in] n INTEGER The number of columns of the matrix A. N >= 0. \param[in] dD REAL vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. \param[in,out] dA REAL array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. \param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). \param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_slascl_diag_q( magma_type_t type, magma_int_t m, magma_int_t n, const float *dD, magma_int_t lddd, float *dA, magma_int_t ldda, magma_int_t *info, magma_queue_t queue ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( (m + NB - 1)/NB ); dim3 threads( NB ); if (type == MagmaLower) { hipLaunchKernelGGL(( slascl_diag_lower) , dim3(grid), dim3(threads), 0, queue , m, n, dD, lddd, dA, ldda); } else if (type == MagmaUpper) { hipLaunchKernelGGL(( slascl_diag_upper) , dim3(grid), dim3(threads), 0, queue , m, n, dD, lddd, dA, ldda); } } /** @see magmablas_slascl2_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_slascl_diag( magma_type_t type, magma_int_t m, magma_int_t n, const float *dD, magma_int_t lddd, float *dA, magma_int_t ldda, magma_int_t *info ) { magmablas_slascl_diag_q( type, m, n, dD, lddd, dA, ldda, info, magma_stream ); }
759af7bd9dbc5382d46b8042d908917ffe6016c7.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zlascl_diag.cu normal z -> s, Sat Nov 15 19:53:59 2014 */ #include "common_magma.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void slascl_diag_lower(int m, int n, const float* D, int ldd, float* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] /= D[j+j*ldd]; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void slascl_diag_upper(int m, int n, const float *D, int ldd, float *A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] /= D[ind+ind*ldd]; } } /** Purpose ------- SLASCL2 scales the M by N real matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- \param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. \param[in] m INTEGER The number of rows of the matrix A. M >= 0. \param[in] n INTEGER The number of columns of the matrix A. N >= 0. \param[in] dD REAL vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. \param[in,out] dA REAL array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. \param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). \param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_slascl_diag_q( magma_type_t type, magma_int_t m, magma_int_t n, const float *dD, magma_int_t lddd, float *dA, magma_int_t ldda, magma_int_t *info, magma_queue_t queue ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( (m + NB - 1)/NB ); dim3 threads( NB ); if (type == MagmaLower) { slascl_diag_lower <<< grid, threads, 0, queue >>> (m, n, dD, lddd, dA, ldda); } else if (type == MagmaUpper) { slascl_diag_upper <<< grid, threads, 0, queue >>> (m, n, dD, lddd, dA, ldda); } } /** @see magmablas_slascl2_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_slascl_diag( magma_type_t type, magma_int_t m, magma_int_t n, const float *dD, magma_int_t lddd, float *dA, magma_int_t ldda, magma_int_t *info ) { magmablas_slascl_diag_q( type, m, n, dD, lddd, dA, ldda, info, magma_stream ); }
34d169293d10878997e9c2d08d238aaddcb606c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // #include "ProjHelperFun.cu.h" #include "Constants.h" #include "TridagPar.h" #include "../include/CudaUtilProj.cu.h" //#include "ProjHost.cu" #define EPSILON 0.001 #define T 32 #define YX(k,j,i) ((k)*(numY)*(numX)+(j)*(numX)+(i)) //[-][numY][numX] #define XY(k,j,i) ((k)*(numY)*(numX)+(j)*(numY)+(i)) //[-][numX][numY] #define ZZ(k,j,i) (k*(numZ)*(numZ)+(j)*(numZ)+(i)) //[-][numZ][numZ] #define D4ID(j,i) ((j)*4+(i)) #define Y4(j,i) ((j)*numY+(i)) //{{{KERNELS ------ __global__ void d_initTimeline( REAL* d_timeline, const unsigned numT, const REAL t){ unsigned gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < numT) { d_timeline[gid] = t*gid / (numT-1); } } __global__ void d_initNUM( REAL* d_num, unsigned int num_size, const REAL d, unsigned myIndex, const REAL s){ const unsigned long gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < num_size) { d_num[gid] = gid*d - myIndex*d + s; } } __global__ void d_initOperator( REAL* d_x, unsigned int x_size, REAL* d_dxx){ const unsigned long gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < x_size) { REAL dxl, dxu; if(gid == 0){ // lower boundary dxl = 0.0; dxu = d_x[1] - d_x[0]; d_dxx[0] = 0.0; d_dxx[1] = 0.0; d_dxx[2] = 0.0; d_dxx[3] = 0.0; }else if(gid == x_size-1){ // upper boundary dxl = d_x[x_size-1] - d_x[x_size-2]; dxu = 0.0; d_dxx[(x_size-1)*4+0] = 0.0; d_dxx[(x_size-1)*4+1] = 0.0; d_dxx[(x_size-1)*4+2] = 0.0; d_dxx[(x_size-1)*4+3] = 0.0; }else{ dxl = d_x[gid] - d_x[gid-1]; dxu = d_x[gid+1] - d_x[gid]; d_dxx[gid*4+0] = 2.0/dxl/(dxl+dxu); d_dxx[gid*4+1] = -2.0*(1.0/dxl + 1.0/dxu)/(dxl+dxu); d_dxx[gid*4+2] = 2.0/dxu/(dxl+dxu); d_dxx[gid*4+3] = 0.0; } } } __global__ void d_setPayoff(REAL* d_result, REAL* d_x, unsigned int x_size, unsigned int y_size, unsigned int z_size){ unsigned int x = blockDim.x*blockIdx.x + threadIdx.x; unsigned int y = blockDim.y*blockIdx.y + threadIdx.y; unsigned int z = blockDim.z*blockIdx.z + threadIdx.z; if(x < x_size && y < y_size && z < z_size){ d_result[z*y_size*x_size + y*x_size + x] = max(d_x[y]-(0.001*z), (REAL)0.0); } } __global__ void d_updateParams(REAL* d_varX, REAL* d_varY, REAL* d_x, REAL* d_y, REAL* d_timeline, int g, REAL alpha, REAL beta, REAL nu, unsigned int numX, unsigned int numY){ unsigned int i = blockDim.x*blockIdx.x + threadIdx.x; unsigned int j = blockDim.y*blockIdx.y + threadIdx.y; if(i >= numX || j >= numY) return; d_varX[i*numY+j] = exp(2.0*( beta*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g])); d_varY[i*numY+j] = exp(2.0*( alpha*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g])); } __global__ void d_updateParams_interchange(REAL* d_varX, REAL* d_varY, REAL* d_x, REAL* d_y, REAL* d_timeline, int g, REAL alpha, REAL beta, REAL nu, unsigned int numX, unsigned int numY){ unsigned int i = blockDim.x*blockIdx.x + threadIdx.x; unsigned int j = blockDim.y*blockIdx.y + threadIdx.y; if(j >= numX || i >= numY) return; d_varX[j*numY+i] = exp(2.0*( beta*log(d_x[j]) + d_y[i] - 0.5*nu*nu*d_timeline[g])); d_varY[j*numY+i] = exp(2.0*( alpha*log(d_x[j]) + d_y[i] - 0.5*nu*nu*d_timeline[g])); } __global__ void d_updateParams_sh(REAL* d_varX, REAL* d_varY, REAL* d_x, REAL* d_y, REAL* d_timeline, unsigned int g, REAL alpha, REAL beta, REAL nu, unsigned int numX, unsigned int numY){ __shared__ REAL sh_varX[T][T+1], sh_varY[T][T+1]; // __shared__ REAL sh_x[T], sh_y[T]; // unsigned int i = blockDim.x*blockIdx.x + threadIdx.x; //numY unsigned int j = blockDim.y*blockIdx.y + threadIdx.y; //numX int tidy = threadIdx.y; int tidx = threadIdx.x; if(j >= numX || i >= numY) return; // shared memory store operation sh_varX[tidy][tidx] = d_varX[j*numY+i]; sh_varY[tidy][tidx] = d_varY[j*numY+i]; sh_x[tidy] = d_x[j]; sh_y[tidx] = d_y[i]; __syncthreads(); sh_varX[tidy][tidx] = exp(2.0*( beta*log(sh_x[tidy]) + sh_y[tidx] - 0.5*nu*nu*d_timeline[g])); sh_varY[tidy][tidx] = exp(2.0*( alpha*log(sh_x[tidy]) + sh_y[tidx] - 0.5*nu*nu*d_timeline[g])); d_varX[j*numY+i] = sh_varX[tidy][tidx]; d_varY[j*numY+i] = sh_varY[tidy][tidx]; } __global__ void d_explicit_xy_implicit_x(REAL* u, REAL* v, REAL* a, REAL* b, REAL* c, REAL* varX, REAL* varY, REAL* timeline, REAL* dxx, REAL* dyy, REAL* result, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int j = blockDim.y * blockIdx.y + threadIdx.y; //numY unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; //numX if(k >= outer || j >= numY || i >= numX) return; // explicit x u[YX(k,j,i)] = (1.0/(timeline[g+1]-timeline[g])) *result[XY(k,i,j)]; if(i > 0) { u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,0)] ) * result[XY(k,i-1,j)]; } u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,1)] ) * result[XY(k,i,j)]; if(i < numX-1) { u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,2)] ) * result[XY(k,i+1,j)]; } // explicit y ; RAW v, write u v[XY(k,i,j)] = 0.0; if(j > 0) { v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[D4ID(j,0)] ) * result[XY(k,i,j-1)]; } v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[D4ID(j,1)] ) * result[XY(k,i,j)]; if(j < numY-1) { v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[D4ID(j,2)] ) * result[XY(k,i,j+1)]; } u[YX(k,j,i)] += v[XY(k,i,j)]; // implicit x // write a,b,c a[ZZ(k,j,i)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,0)]); b[ZZ(k,j,i)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,1)]); c[ZZ(k,j,i)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,2)]); } //interchange the two inner loop, also need transpose u, dyy, a ,b,c __global__ void d_explicit_xy_implicit_x_interchange(REAL* u_tr, REAL* v, REAL* a, REAL* b, REAL* c, REAL* varX, REAL* varY, REAL* timeline, REAL* dxx, REAL* dyy_tr, REAL* result, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int i = blockDim.y * blockIdx.y + threadIdx.y; //numX unsigned int j = blockDim.x * blockIdx.x + threadIdx.x; //numY if(k >= outer || j >= numY || i >= numX) return; // explicit x u_tr[XY(k,i,j)] = (1.0/(timeline[g+1]-timeline[g])) *result[XY(k,i,j)]; if(i > 0) { u_tr[XY(k,i,j)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,0)] ) * result[XY(k,i-1,j)]; } u_tr[XY(k,i,j)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,1)] ) * result[XY(k,i,j)]; if(i < numX-1) { u_tr[XY(k,i,j)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,2)] ) * result[XY(k,i+1,j)]; } // explicit y ; RAW v, write u v[XY(k,i,j)] = 0.0; if(j > 0) { v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy_tr[Y4(0,j)] ) * result[XY(k,i,j-1)]; } v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy_tr[Y4(1,j)] ) * result[XY(k,i,j)]; if(j < numY-1) { v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy_tr[Y4(2,j)] ) * result[XY(k,i,j+1)]; } u_tr[XY(k,i,j)] += v[XY(k,i,j)]; // implicit x // write a,b,c a[ZZ(k,i,j)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,0)]); b[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,1)]); c[ZZ(k,i,j)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,2)]); } #define UI(k,j,i) ((k)*(middle)*(n)+(j)*(n)+(i)) __global__ void d_tridag_implicit_y( REAL* a, REAL* b, REAL* c, REAL* r, int n, REAL* u, REAL* uu, // tridag unsigned numX, unsigned numY, unsigned outer, unsigned numZ, unsigned middle){ unsigned int j = blockDim.x*blockIdx.x + threadIdx.x; //numX unsigned int k = blockDim.y*blockIdx.y + threadIdx.y; //outer if(k >= outer || j >= middle) return; REAL beta; u[UI(k,j,0)] = r[ZZ(k,j,0)]; //u[k*numX*numY + j*numY + i] uu[ZZ(k,j,0)] = b[ZZ(k,j,0)]; for(int i=1; i< n; i++) { beta = a[ZZ(k,j,i)] / uu[ZZ(k,j,i-1)]; uu[ZZ(k,j,i)] = b[ZZ(k,j,i)] - beta*c[ZZ(k,j,i-1)]; u[UI(k,j,i)] = r[ZZ(k,j,i)] - beta*u[UI(k,j,i-1)]; } u[UI(k,j,n-1)] = u[UI(k,j,n-1)] / uu[ZZ(k,j,n-1)]; for(int i=n-2; i>=0; i--) { u[UI(k,j,i)] = (u[UI(k,j,i)] - c[ZZ(k,j,i)]*u[UI(k,j,i+1)] ) / uu[ZZ(k,j,i)]; } } __global__ void d_tridag_implicit_x( REAL* a, REAL* b, REAL* c, REAL* r, int n, REAL* u, REAL* uu, // tridag unsigned numX, unsigned numY, unsigned outer, unsigned numZ, unsigned middle){ unsigned int j = blockDim.x*blockIdx.x + threadIdx.x; //numY unsigned int k = blockDim.y*blockIdx.y + threadIdx.y; //outer if(k >= outer || j >= middle) return; REAL beta; u[UI(k,j,0)] = r[UI(k,j,0)]; uu[ZZ(k,j,0)] = b[ZZ(k,j,0)]; //uu size?? [numZ][numZ] for(int i=1; i< n; i++) { beta = a[ZZ(k,j,i)] / uu[ZZ(k,j,i-1)]; uu[ZZ(k,j,i)] = b[ZZ(k,j,i)] - beta*c[ZZ(k,j,i-1)]; u[UI(k,j,i)] = r[UI(k,j,i)] - beta*u[UI(k,j,i-1)]; } u[UI(k,j,n-1)] = u[UI(k,j,n-1)] / uu[ZZ(k,j,n-1)]; for(int i=n-2; i>=0; i--) { u[UI(k,j,i)] = (u[UI(k,j,i)] - c[ZZ(k,j,i)]*u[UI(k,j,i+1)] ) / uu[ZZ(k,j,i)]; } } __global__ void d_implicit_y(REAL* u, REAL* v, REAL* a, REAL* b, REAL* c, REAL* y, REAL* varY, REAL* timeline, REAL* dyy, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int i = blockDim.y * blockIdx.y + threadIdx.y; //numX unsigned int j = blockDim.x * blockIdx.x + threadIdx.x; //numY if(k >= outer || j >= numY || i >= numX) return; a[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy[D4ID(j,0)]); b[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varY[XY(0,i,j)]*dyy[D4ID(j,1)]); c[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy[D4ID(j,2)]); y[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) * u[YX(k,j,i)] - 0.5*v[XY(k,i,j)]; } __global__ void d_implicit_y_trans(REAL* u_tr, REAL* v, REAL* a, REAL* b, REAL* c, REAL* y, REAL* varY, REAL* timeline, REAL* dyy_tr, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int i = blockDim.y * blockIdx.y + threadIdx.y; //numX unsigned int j = blockDim.x * blockIdx.x + threadIdx.x; //numY if(k >= outer || j >= numY || i >= numX) return; a[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy_tr[Y4(0,j)]); b[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varY[XY(0,i,j)]*dyy_tr[Y4(1,j)]); c[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy_tr[Y4(2,j)]); y[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) * u_tr[XY(k,i,j)] - 0.5*v[XY(k,i,j)]; } __global__ void sh_implicit_y(REAL* u_tr, REAL* v, REAL* a, REAL* b, REAL* c, REAL* y, REAL* varY, REAL* timeline, REAL* dyy_tr, int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int i = blockDim.y * blockIdx.y + threadIdx.y; //numX unsigned int j = blockDim.x * blockIdx.x + threadIdx.x; //numY if(k >= outer || j >= numY || i >= numX) return; __shared__ REAL sh_varY[T][T+1], // sh_dyy[T][T+1], // sh_u[T][T+1], // sh_v[T][T+1], sh_a[T][T+1], sh_b[T][T+1], sh_c[T][T+1], sh_y[T][T+1]; // int tidy = threadIdx.y; int tidx = threadIdx.x; // copy data from global memory to shared memory // sh_u[tidy][tidx] = u_tr[XY(k,i,j)]; sh_a[tidy][tidx] = a[ZZ(k,i,j)]; sh_b[tidy][tidx] = b[ZZ(k,i,j)]; sh_c[tidy][tidx] = c[ZZ(k,i,j)]; // sh_v[tidy][tidx] = v[XY(k,i,j)] ; sh_varY[tidy][tidx] = varY[i*numY +j]; sh_y[tidy][tidx] = y[ZZ(k,i,j)]; // sh_dyy[tidy][tidx] = (i<4) ? dyy_tr[Y4(i,j)] : 0.0; // need transpose __syncthreads(); // a[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy_tr[Y4(0,j)]); // b[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varY[XY(0,i,j)]*dyy_tr[Y4(1,j)]); // c[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy_tr[Y4(2,j)]); // y[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) * u[YX(k,j,i)] - 0.5*v[XY(k,i,j)]; sh_a[tidy][tidx] = - 0.5*(0.5* sh_varY[tidy][tidx] * dyy_tr[Y4(0,j)]); sh_b[tidy][tidx] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*sh_varY[tidy][tidx]*dyy_tr[Y4(1,j)]); sh_c[tidy][tidx] = - 0.5*(0.5*sh_varY[tidy][tidx]*dyy_tr[Y4(2,j)]); sh_y[tidy][tidx] = ( 1.0/(timeline[g+1]-timeline[g])) * u_tr[XY(k,i,j)] - 0.5*v[XY(k,i,j)]; a[ZZ(k,i,j)] = sh_a[tidy][tidx]; a[ZZ(k,i,j)] = sh_b[tidy][tidx]; a[ZZ(k,i,j)] = sh_c[tidy][tidx]; y[ZZ(k,i,j)] = sh_y[tidy][tidx]; } __global__ void sgmMatTranspose( REAL* A, REAL* trA, int rowsA, int colsA ){ __shared__ REAL tile[T][T+1]; int tidx = threadIdx.x; int tidy = threadIdx.y; unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; unsigned int i = blockDim.y * blockIdx.y + threadIdx.y; unsigned int j = blockDim.x * blockIdx.x + threadIdx.x; A += k*rowsA*colsA; trA += k*rowsA*colsA; if( j < colsA && i < rowsA ) tile[tidy][tidx] = A[i* colsA + j]; __syncthreads(); i=blockIdx.y*blockDim.y+tidx; j=blockIdx.x*blockDim.x+tidy; if( j < colsA && i < rowsA ) trA[j*rowsA+i] = tile[tidx][tidy]; } // 2D matrix transpose __global__ void matTranspose2D(REAL* A, REAL* trA, int rowsA, int colsA){ __shared__ REAL tile[T][T+1]; int tidx = threadIdx.x; int tidy = threadIdx.y; int j = blockIdx.x*T + tidx; int i = blockIdx.y*T + tidy; if( j < colsA && i < rowsA ) tile[tidy][tidx] = A[i*colsA+j]; __syncthreads(); i = blockIdx.y*T + threadIdx.x; j = blockIdx.x*T + threadIdx.y; if( j < colsA && i < rowsA ) trA[j*rowsA+i] = tile[tidx][tidy]; } //{{{ wrapper void initGrid_GPU( const REAL s0, const REAL alpha, const REAL nu,const REAL t, const unsigned numX, const unsigned numY, const unsigned numT, REAL* d_myX, REAL* d_myY, REAL* d_myTimeline, unsigned myXindex, unsigned myYindex) { const unsigned int BLOCK_SIZE = 256; unsigned int NUM_BLOCKS = ceil(numT / (float)BLOCK_SIZE); hipLaunchKernelGGL(( d_initTimeline), dim3(NUM_BLOCKS),dim3(BLOCK_SIZE), 0, 0, d_myTimeline, numT, t); NUM_BLOCKS = ceil(numX / (float)BLOCK_SIZE); const REAL stdX = 20.0*alpha*s0*sqrt(t); const REAL dx = stdX/numX; hipLaunchKernelGGL(( d_initNUM), dim3(NUM_BLOCKS),dim3(BLOCK_SIZE), 0, 0, d_myX, numX, dx, myXindex, s0); const REAL stdY = 10.0*nu*sqrt(t); const REAL dy = stdY/numY; const REAL logAlpha = log(alpha); NUM_BLOCKS = ceil(numY / (float)BLOCK_SIZE); hipLaunchKernelGGL(( d_initNUM), dim3(NUM_BLOCKS),dim3(BLOCK_SIZE), 0, 0, d_myY, numY, dy, myYindex, logAlpha); } void initOperator_GPU(REAL* d_x, unsigned int x_size, REAL* d_dxx){ const unsigned int BLOCK_SIZE = 256; unsigned int NUM_BLOCKS = ceil(x_size / (float)BLOCK_SIZE); hipLaunchKernelGGL(( d_initOperator), dim3(NUM_BLOCKS),dim3(BLOCK_SIZE), 0, 0, d_x, x_size, d_dxx); } void run_OrigCPU( const unsigned int& outer, const unsigned int& numX, const unsigned int& numY, const unsigned int& numT, const REAL& s0, const REAL& t, const REAL& alpha, const REAL& nu, const REAL& beta, REAL* res // [outer] RESULT ) { //// ---------- GPU version -------------------- //// REAL *h_result; // final result // GPU variables REAL *d_x, *d_y, *d_timeline, *d_dxx, *d_dyy; REAL *d_result, *d_varX, *d_varY; REAL *d_a, *d_b, *d_c, *d_yy, *d_yyy, *d_u, *d_v; // myXindex myYindex are scalars const REAL stdX = 20.0*alpha*s0*sqrt(t); const REAL dx = stdX/numX; unsigned myXindex = static_cast<unsigned>(s0/dx) % numX; unsigned myYindex = static_cast<unsigned>(numY/2.0); unsigned numZ = max(numX,numY); int memsize_X = numX * sizeof(REAL); int memsize_Y = numY * sizeof(REAL); int memsize_T = numT * sizeof(REAL); int memsize_XY = numX * numY * sizeof(REAL); int memsize_OXY = outer * numX * numY * sizeof (REAL); int memsize_OZZ = outer * numZ * numZ * sizeof (REAL); // CPU variables h_result = (REAL*) malloc (memsize_OXY); // GPU variables hipMalloc((void**)&d_result, memsize_OXY); //[outer][numX][numY] hipMalloc((void**)&d_varX, memsize_XY); //[numX][numY] hipMalloc((void**)&d_varY, memsize_XY); //[numX][numY] hipMalloc((void**)&d_x, memsize_X); //[numX] hipMalloc((void**)&d_y, memsize_Y); //[numY] hipMalloc((void**)&d_timeline, memsize_T); //[numT] hipMalloc((void**)&d_dxx, 4 * memsize_X); //[numX][4] hipMalloc((void**)&d_dyy, 4 * memsize_Y); //[numY][4] //a b c yy yyy: [outer][numZ][numZ] hipMalloc((void**)&d_a , memsize_OZZ); hipMalloc((void**)&d_b , memsize_OZZ); hipMalloc((void**)&d_c , memsize_OZZ); hipMalloc((void**)&d_yy , memsize_OZZ); //y in seq code hipMalloc((void**)&d_yyy, memsize_OZZ); //yy in seq code hipMalloc((void**)&d_u , memsize_OXY); //d_u : [outer][numY][numX] hipMalloc((void**)&d_v , memsize_OXY); //d_v : [outer][numX][numY] // variables for transposition REAL * d_u_tr; REAL * d_dyy_tr; REAL * d_a_tr; REAL * d_b_tr; REAL * d_c_tr; hipMalloc((void**)&d_u_tr , memsize_OXY); //d_u : [outer][numY][numX] hipMalloc((void**)&d_dyy_tr, memsize_Y *4); hipMalloc((void**)&d_a_tr, memsize_OZZ); hipMalloc((void**)&d_b_tr, memsize_OZZ); hipMalloc((void**)&d_c_tr, memsize_OZZ); dim3 block_2D(T,T); dim3 grid_2D_4Y(1, ceil(((float)numY)/T)); dim3 grid_2D_XY(ceil(((float)numY)/ T ),ceil(((float)numX)/ T )); dim3 grid_2D_OX(ceil(((float)numX)/T), ceil((float)outer/T)); dim3 grid_2D_OY(ceil(((float)numY)/T), ceil((float)outer/T)); dim3 grid_2D_YX(ceil(((float)numX)/T), ceil(((float)numY)/T)); dim3 block_3D(T, T, 1); dim3 grid_3D_OXY(ceil(((float)numY)/T), ceil(((float)numX)/T), ceil(((float)outer)/1.0)); dim3 grid_3D_OZZ(ceil(((float)numZ)/T), ceil(((float)numZ)/T),ceil(((float)outer)/1.0)); dim3 grid_3D_OYX(ceil(((float)numX)/T), ceil(((float)numY)/T),ceil(((float)outer)/1.0) ); //GPU init initGrid_GPU(s0, alpha, nu,t, numX,numY, numT, d_x, d_y, d_timeline, myXindex, myYindex); initOperator_GPU( d_x, numX, d_dxx); initOperator_GPU( d_y, numY, d_dyy); // GPU setPayoff hipLaunchKernelGGL(( d_setPayoff), dim3(grid_3D_OXY), dim3(block_3D), 0, 0, d_result, d_x, numY, numX, outer); // timeline loop for(int g = numT-2;g>=0;--g) { // second outer loop, g //----GPU updateParams hipLaunchKernelGGL(( d_updateParams), dim3(grid_2D_YX), dim3(block_2D) , 0, 0, d_varX, d_varY, d_x, d_y, d_timeline,g, alpha, beta, nu, numX, numY); // d_updateParams_sh<<< grid_2D_XY, block_2D >>>(d_varX, d_varY, d_x, d_y, d_timeline,g, // alpha, beta, nu, numX, numY); // d_updateParams_interchange<<< grid_2D_XY, block_2D >>>(d_varX, d_varY, d_x, d_y, d_timeline,g, // alpha, beta, nu, numX, numY); //---- GPU rollback Part_1 // matTranspose2D<<< grid_2D_4Y, block_2D >>>(d_dyy, d_dyy_tr, numY, 4); // transpose d_dyy hipLaunchKernelGGL(( d_explicit_xy_implicit_x), dim3(grid_3D_OYX), dim3(block_3D), 0, 0, d_u,d_v,d_a,d_b,d_c, d_varX,d_varY,d_timeline,d_dxx,d_dyy,d_result, g, numX, numY, outer, numZ); // d_explicit_xy_implicit_x_interchange<<<grid_3D_OXY, block_3D>>>(d_u_tr,d_v,d_a_tr,d_b_tr,d_c_tr, // d_varX,d_varY,d_timeline,d_dxx,d_dyy_tr,d_result, g, numX, numY, outer, numZ); // transpose back the variables // sgmMatTranspose <<< grid_3D_OXY, block_3D>>>( d_u_tr, d_u, numX, numY ); // sgmMatTranspose <<< grid_3D_OZZ, block_3D>>>( d_a_tr, d_a, numZ, numZ ); // sgmMatTranspose <<< grid_3D_OZZ, block_3D>>>( d_b_tr, d_b, numZ, numZ ); // sgmMatTranspose <<< grid_3D_OZZ, block_3D>>>( d_c_tr, d_c, numZ, numZ ); //------ GPU rollback part-2 hipLaunchKernelGGL(( d_tridag_implicit_x) , dim3(grid_2D_OY), dim3(block_2D) , 0, 0, d_a,d_b,d_c, d_u, numX,d_u,d_yyy,numX,numY,outer,numZ,numY); // -------GPU rollback part-3 hipLaunchKernelGGL(( d_implicit_y), dim3(grid_3D_OXY), dim3(block_3D) , 0, 0, d_u,d_v,d_a,d_b,d_c, d_yy, d_varY,d_timeline, d_dyy, g, numX, numY, outer, numZ); // sgmMatTranspose <<< grid_3D_OYX, block_3D>>>( d_u, d_u_tr, numY, numX ); // transpose u // d_implicit_y_trans<<< grid_3D_OXY, block_3D >>>(d_u_tr,d_v,d_a,d_b,d_c, d_yy, // d_varY,d_timeline, d_dyy_tr, g, numX, numY, outer, numZ); //---------- GPU rollback part-4 hipLaunchKernelGGL(( d_tridag_implicit_y) , dim3(grid_2D_OX), dim3(block_2D) , 0, 0, d_a,d_b,d_c,d_yy,numY,d_result,d_yyy,numX,numY,outer,numZ,numX); } // Timeline loop end // read the final result hipMemcpy( h_result , d_result , memsize_OXY , hipMemcpyDeviceToHost); #pragma omp parallel for default(shared) schedule(static) for( unsigned k = 0; k < outer; ++ k ) //outermost loop k res[k] = h_result[XY(k,myXindex,myYindex)]; hipFree(d_x); hipFree(d_y); hipFree(d_dxx);hipFree(d_dyy); hipFree(d_timeline); hipFree(d_result); hipFree(d_varX); hipFree(d_varY); hipFree(d_a); hipFree(d_b);hipFree(d_c); hipFree(d_yy);hipFree(d_yyy); hipFree(d_u); hipFree(d_v); hipFree(d_u_tr); hipFree(d_dyy_tr); hipFree(d_a_tr); hipFree(d_b_tr); hipFree(d_c_tr); free(h_result); // #endif }
34d169293d10878997e9c2d08d238aaddcb606c8.cu
// 全部函数测试有效 最终版本 #include "ProjHelperFun.cu.h" #include "Constants.h" #include "TridagPar.h" #include "../include/CudaUtilProj.cu.h" //#include "ProjHost.cu" #define EPSILON 0.001 #define T 32 #define YX(k,j,i) ((k)*(numY)*(numX)+(j)*(numX)+(i)) //[-][numY][numX] #define XY(k,j,i) ((k)*(numY)*(numX)+(j)*(numY)+(i)) //[-][numX][numY] #define ZZ(k,j,i) (k*(numZ)*(numZ)+(j)*(numZ)+(i)) //[-][numZ][numZ] #define D4ID(j,i) ((j)*4+(i)) #define Y4(j,i) ((j)*numY+(i)) //{{{KERNELS ------ __global__ void d_initTimeline( REAL* d_timeline, const unsigned numT, const REAL t){ unsigned gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < numT) { d_timeline[gid] = t*gid / (numT-1); } } __global__ void d_initNUM( REAL* d_num, unsigned int num_size, const REAL d, unsigned myIndex, const REAL s){ const unsigned long gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < num_size) { d_num[gid] = gid*d - myIndex*d + s; } } __global__ void d_initOperator( REAL* d_x, unsigned int x_size, REAL* d_dxx){ const unsigned long gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < x_size) { REAL dxl, dxu; if(gid == 0){ // lower boundary dxl = 0.0; dxu = d_x[1] - d_x[0]; d_dxx[0] = 0.0; d_dxx[1] = 0.0; d_dxx[2] = 0.0; d_dxx[3] = 0.0; }else if(gid == x_size-1){ // upper boundary dxl = d_x[x_size-1] - d_x[x_size-2]; dxu = 0.0; d_dxx[(x_size-1)*4+0] = 0.0; d_dxx[(x_size-1)*4+1] = 0.0; d_dxx[(x_size-1)*4+2] = 0.0; d_dxx[(x_size-1)*4+3] = 0.0; }else{ dxl = d_x[gid] - d_x[gid-1]; dxu = d_x[gid+1] - d_x[gid]; d_dxx[gid*4+0] = 2.0/dxl/(dxl+dxu); d_dxx[gid*4+1] = -2.0*(1.0/dxl + 1.0/dxu)/(dxl+dxu); d_dxx[gid*4+2] = 2.0/dxu/(dxl+dxu); d_dxx[gid*4+3] = 0.0; } } } __global__ void d_setPayoff(REAL* d_result, REAL* d_x, unsigned int x_size, unsigned int y_size, unsigned int z_size){ unsigned int x = blockDim.x*blockIdx.x + threadIdx.x; unsigned int y = blockDim.y*blockIdx.y + threadIdx.y; unsigned int z = blockDim.z*blockIdx.z + threadIdx.z; if(x < x_size && y < y_size && z < z_size){ d_result[z*y_size*x_size + y*x_size + x] = max(d_x[y]-(0.001*z), (REAL)0.0); } } __global__ void d_updateParams(REAL* d_varX, REAL* d_varY, REAL* d_x, REAL* d_y, REAL* d_timeline, int g, REAL alpha, REAL beta, REAL nu, unsigned int numX, unsigned int numY){ unsigned int i = blockDim.x*blockIdx.x + threadIdx.x; unsigned int j = blockDim.y*blockIdx.y + threadIdx.y; if(i >= numX || j >= numY) return; d_varX[i*numY+j] = exp(2.0*( beta*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g])); d_varY[i*numY+j] = exp(2.0*( alpha*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g])); } __global__ void d_updateParams_interchange(REAL* d_varX, REAL* d_varY, REAL* d_x, REAL* d_y, REAL* d_timeline, int g, REAL alpha, REAL beta, REAL nu, unsigned int numX, unsigned int numY){ unsigned int i = blockDim.x*blockIdx.x + threadIdx.x; unsigned int j = blockDim.y*blockIdx.y + threadIdx.y; if(j >= numX || i >= numY) return; d_varX[j*numY+i] = exp(2.0*( beta*log(d_x[j]) + d_y[i] - 0.5*nu*nu*d_timeline[g])); d_varY[j*numY+i] = exp(2.0*( alpha*log(d_x[j]) + d_y[i] - 0.5*nu*nu*d_timeline[g])); } __global__ void d_updateParams_sh(REAL* d_varX, REAL* d_varY, REAL* d_x, REAL* d_y, REAL* d_timeline, unsigned int g, REAL alpha, REAL beta, REAL nu, unsigned int numX, unsigned int numY){ __shared__ REAL sh_varX[T][T+1], sh_varY[T][T+1]; // __shared__ REAL sh_x[T], sh_y[T]; // unsigned int i = blockDim.x*blockIdx.x + threadIdx.x; //numY unsigned int j = blockDim.y*blockIdx.y + threadIdx.y; //numX int tidy = threadIdx.y; int tidx = threadIdx.x; if(j >= numX || i >= numY) return; // shared memory store operation sh_varX[tidy][tidx] = d_varX[j*numY+i]; sh_varY[tidy][tidx] = d_varY[j*numY+i]; sh_x[tidy] = d_x[j]; sh_y[tidx] = d_y[i]; __syncthreads(); sh_varX[tidy][tidx] = exp(2.0*( beta*log(sh_x[tidy]) + sh_y[tidx] - 0.5*nu*nu*d_timeline[g])); sh_varY[tidy][tidx] = exp(2.0*( alpha*log(sh_x[tidy]) + sh_y[tidx] - 0.5*nu*nu*d_timeline[g])); d_varX[j*numY+i] = sh_varX[tidy][tidx]; d_varY[j*numY+i] = sh_varY[tidy][tidx]; } __global__ void d_explicit_xy_implicit_x(REAL* u, REAL* v, REAL* a, REAL* b, REAL* c, REAL* varX, REAL* varY, REAL* timeline, REAL* dxx, REAL* dyy, REAL* result, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int j = blockDim.y * blockIdx.y + threadIdx.y; //numY unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; //numX if(k >= outer || j >= numY || i >= numX) return; // explicit x u[YX(k,j,i)] = (1.0/(timeline[g+1]-timeline[g])) *result[XY(k,i,j)]; if(i > 0) { u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,0)] ) * result[XY(k,i-1,j)]; } u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,1)] ) * result[XY(k,i,j)]; if(i < numX-1) { u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,2)] ) * result[XY(k,i+1,j)]; } // explicit y ; RAW v, write u v[XY(k,i,j)] = 0.0; if(j > 0) { v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[D4ID(j,0)] ) * result[XY(k,i,j-1)]; } v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[D4ID(j,1)] ) * result[XY(k,i,j)]; if(j < numY-1) { v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[D4ID(j,2)] ) * result[XY(k,i,j+1)]; } u[YX(k,j,i)] += v[XY(k,i,j)]; // implicit x // write a,b,c a[ZZ(k,j,i)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,0)]); b[ZZ(k,j,i)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,1)]); c[ZZ(k,j,i)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,2)]); } //interchange the two inner loop, also need transpose u, dyy, a ,b,c __global__ void d_explicit_xy_implicit_x_interchange(REAL* u_tr, REAL* v, REAL* a, REAL* b, REAL* c, REAL* varX, REAL* varY, REAL* timeline, REAL* dxx, REAL* dyy_tr, REAL* result, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int i = blockDim.y * blockIdx.y + threadIdx.y; //numX unsigned int j = blockDim.x * blockIdx.x + threadIdx.x; //numY if(k >= outer || j >= numY || i >= numX) return; // explicit x u_tr[XY(k,i,j)] = (1.0/(timeline[g+1]-timeline[g])) *result[XY(k,i,j)]; if(i > 0) { u_tr[XY(k,i,j)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,0)] ) * result[XY(k,i-1,j)]; } u_tr[XY(k,i,j)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,1)] ) * result[XY(k,i,j)]; if(i < numX-1) { u_tr[XY(k,i,j)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,2)] ) * result[XY(k,i+1,j)]; } // explicit y ; RAW v, write u v[XY(k,i,j)] = 0.0; if(j > 0) { v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy_tr[Y4(0,j)] ) * result[XY(k,i,j-1)]; } v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy_tr[Y4(1,j)] ) * result[XY(k,i,j)]; if(j < numY-1) { v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy_tr[Y4(2,j)] ) * result[XY(k,i,j+1)]; } u_tr[XY(k,i,j)] += v[XY(k,i,j)]; // implicit x // write a,b,c a[ZZ(k,i,j)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,0)]); b[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,1)]); c[ZZ(k,i,j)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,2)]); } #define UI(k,j,i) ((k)*(middle)*(n)+(j)*(n)+(i)) __global__ void d_tridag_implicit_y( REAL* a, REAL* b, REAL* c, REAL* r, int n, REAL* u, REAL* uu, // tridag unsigned numX, unsigned numY, unsigned outer, unsigned numZ, unsigned middle){ unsigned int j = blockDim.x*blockIdx.x + threadIdx.x; //numX unsigned int k = blockDim.y*blockIdx.y + threadIdx.y; //outer if(k >= outer || j >= middle) return; REAL beta; u[UI(k,j,0)] = r[ZZ(k,j,0)]; //u[k*numX*numY + j*numY + i] uu[ZZ(k,j,0)] = b[ZZ(k,j,0)]; for(int i=1; i< n; i++) { beta = a[ZZ(k,j,i)] / uu[ZZ(k,j,i-1)]; uu[ZZ(k,j,i)] = b[ZZ(k,j,i)] - beta*c[ZZ(k,j,i-1)]; u[UI(k,j,i)] = r[ZZ(k,j,i)] - beta*u[UI(k,j,i-1)]; } u[UI(k,j,n-1)] = u[UI(k,j,n-1)] / uu[ZZ(k,j,n-1)]; for(int i=n-2; i>=0; i--) { u[UI(k,j,i)] = (u[UI(k,j,i)] - c[ZZ(k,j,i)]*u[UI(k,j,i+1)] ) / uu[ZZ(k,j,i)]; } } __global__ void d_tridag_implicit_x( REAL* a, REAL* b, REAL* c, REAL* r, int n, REAL* u, REAL* uu, // tridag unsigned numX, unsigned numY, unsigned outer, unsigned numZ, unsigned middle){ unsigned int j = blockDim.x*blockIdx.x + threadIdx.x; //numY unsigned int k = blockDim.y*blockIdx.y + threadIdx.y; //outer if(k >= outer || j >= middle) return; REAL beta; u[UI(k,j,0)] = r[UI(k,j,0)]; uu[ZZ(k,j,0)] = b[ZZ(k,j,0)]; //uu size?? [numZ][numZ] for(int i=1; i< n; i++) { beta = a[ZZ(k,j,i)] / uu[ZZ(k,j,i-1)]; uu[ZZ(k,j,i)] = b[ZZ(k,j,i)] - beta*c[ZZ(k,j,i-1)]; u[UI(k,j,i)] = r[UI(k,j,i)] - beta*u[UI(k,j,i-1)]; } u[UI(k,j,n-1)] = u[UI(k,j,n-1)] / uu[ZZ(k,j,n-1)]; for(int i=n-2; i>=0; i--) { u[UI(k,j,i)] = (u[UI(k,j,i)] - c[ZZ(k,j,i)]*u[UI(k,j,i+1)] ) / uu[ZZ(k,j,i)]; } } __global__ void d_implicit_y(REAL* u, REAL* v, REAL* a, REAL* b, REAL* c, REAL* y, REAL* varY, REAL* timeline, REAL* dyy, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int i = blockDim.y * blockIdx.y + threadIdx.y; //numX unsigned int j = blockDim.x * blockIdx.x + threadIdx.x; //numY if(k >= outer || j >= numY || i >= numX) return; a[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy[D4ID(j,0)]); b[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varY[XY(0,i,j)]*dyy[D4ID(j,1)]); c[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy[D4ID(j,2)]); y[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) * u[YX(k,j,i)] - 0.5*v[XY(k,i,j)]; } __global__ void d_implicit_y_trans(REAL* u_tr, REAL* v, REAL* a, REAL* b, REAL* c, REAL* y, REAL* varY, REAL* timeline, REAL* dyy_tr, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int i = blockDim.y * blockIdx.y + threadIdx.y; //numX unsigned int j = blockDim.x * blockIdx.x + threadIdx.x; //numY if(k >= outer || j >= numY || i >= numX) return; a[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy_tr[Y4(0,j)]); b[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varY[XY(0,i,j)]*dyy_tr[Y4(1,j)]); c[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy_tr[Y4(2,j)]); y[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) * u_tr[XY(k,i,j)] - 0.5*v[XY(k,i,j)]; } __global__ void sh_implicit_y(REAL* u_tr, REAL* v, REAL* a, REAL* b, REAL* c, REAL* y, REAL* varY, REAL* timeline, REAL* dyy_tr, int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int i = blockDim.y * blockIdx.y + threadIdx.y; //numX unsigned int j = blockDim.x * blockIdx.x + threadIdx.x; //numY if(k >= outer || j >= numY || i >= numX) return; __shared__ REAL sh_varY[T][T+1], // sh_dyy[T][T+1], // sh_u[T][T+1], // sh_v[T][T+1], sh_a[T][T+1], sh_b[T][T+1], sh_c[T][T+1], sh_y[T][T+1]; // int tidy = threadIdx.y; int tidx = threadIdx.x; // copy data from global memory to shared memory // sh_u[tidy][tidx] = u_tr[XY(k,i,j)]; sh_a[tidy][tidx] = a[ZZ(k,i,j)]; sh_b[tidy][tidx] = b[ZZ(k,i,j)]; sh_c[tidy][tidx] = c[ZZ(k,i,j)]; // sh_v[tidy][tidx] = v[XY(k,i,j)] ; sh_varY[tidy][tidx] = varY[i*numY +j]; sh_y[tidy][tidx] = y[ZZ(k,i,j)]; // sh_dyy[tidy][tidx] = (i<4) ? dyy_tr[Y4(i,j)] : 0.0; // need transpose __syncthreads(); // a[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy_tr[Y4(0,j)]); // b[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varY[XY(0,i,j)]*dyy_tr[Y4(1,j)]); // c[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy_tr[Y4(2,j)]); // y[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) * u[YX(k,j,i)] - 0.5*v[XY(k,i,j)]; sh_a[tidy][tidx] = - 0.5*(0.5* sh_varY[tidy][tidx] * dyy_tr[Y4(0,j)]); sh_b[tidy][tidx] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*sh_varY[tidy][tidx]*dyy_tr[Y4(1,j)]); sh_c[tidy][tidx] = - 0.5*(0.5*sh_varY[tidy][tidx]*dyy_tr[Y4(2,j)]); sh_y[tidy][tidx] = ( 1.0/(timeline[g+1]-timeline[g])) * u_tr[XY(k,i,j)] - 0.5*v[XY(k,i,j)]; a[ZZ(k,i,j)] = sh_a[tidy][tidx]; a[ZZ(k,i,j)] = sh_b[tidy][tidx]; a[ZZ(k,i,j)] = sh_c[tidy][tidx]; y[ZZ(k,i,j)] = sh_y[tidy][tidx]; } __global__ void sgmMatTranspose( REAL* A, REAL* trA, int rowsA, int colsA ){ __shared__ REAL tile[T][T+1]; int tidx = threadIdx.x; int tidy = threadIdx.y; unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; unsigned int i = blockDim.y * blockIdx.y + threadIdx.y; unsigned int j = blockDim.x * blockIdx.x + threadIdx.x; A += k*rowsA*colsA; trA += k*rowsA*colsA; if( j < colsA && i < rowsA ) tile[tidy][tidx] = A[i* colsA + j]; __syncthreads(); i=blockIdx.y*blockDim.y+tidx; j=blockIdx.x*blockDim.x+tidy; if( j < colsA && i < rowsA ) trA[j*rowsA+i] = tile[tidx][tidy]; } // 2D matrix transpose __global__ void matTranspose2D(REAL* A, REAL* trA, int rowsA, int colsA){ __shared__ REAL tile[T][T+1]; int tidx = threadIdx.x; int tidy = threadIdx.y; int j = blockIdx.x*T + tidx; int i = blockIdx.y*T + tidy; if( j < colsA && i < rowsA ) tile[tidy][tidx] = A[i*colsA+j]; __syncthreads(); i = blockIdx.y*T + threadIdx.x; j = blockIdx.x*T + threadIdx.y; if( j < colsA && i < rowsA ) trA[j*rowsA+i] = tile[tidx][tidy]; } //{{{ wrapper void initGrid_GPU( const REAL s0, const REAL alpha, const REAL nu,const REAL t, const unsigned numX, const unsigned numY, const unsigned numT, REAL* d_myX, REAL* d_myY, REAL* d_myTimeline, unsigned myXindex, unsigned myYindex) { const unsigned int BLOCK_SIZE = 256; unsigned int NUM_BLOCKS = ceil(numT / (float)BLOCK_SIZE); d_initTimeline<<<NUM_BLOCKS,BLOCK_SIZE>>>(d_myTimeline, numT, t); NUM_BLOCKS = ceil(numX / (float)BLOCK_SIZE); const REAL stdX = 20.0*alpha*s0*sqrt(t); const REAL dx = stdX/numX; d_initNUM<<<NUM_BLOCKS,BLOCK_SIZE>>>(d_myX, numX, dx, myXindex, s0); const REAL stdY = 10.0*nu*sqrt(t); const REAL dy = stdY/numY; const REAL logAlpha = log(alpha); NUM_BLOCKS = ceil(numY / (float)BLOCK_SIZE); d_initNUM<<<NUM_BLOCKS,BLOCK_SIZE>>>(d_myY, numY, dy, myYindex, logAlpha); } void initOperator_GPU(REAL* d_x, unsigned int x_size, REAL* d_dxx){ const unsigned int BLOCK_SIZE = 256; unsigned int NUM_BLOCKS = ceil(x_size / (float)BLOCK_SIZE); d_initOperator<<<NUM_BLOCKS,BLOCK_SIZE>>>(d_x, x_size, d_dxx); } void run_OrigCPU( const unsigned int& outer, const unsigned int& numX, const unsigned int& numY, const unsigned int& numT, const REAL& s0, const REAL& t, const REAL& alpha, const REAL& nu, const REAL& beta, REAL* res // [outer] RESULT ) { //// ---------- GPU version -------------------- //// REAL *h_result; // final result // GPU variables REAL *d_x, *d_y, *d_timeline, *d_dxx, *d_dyy; REAL *d_result, *d_varX, *d_varY; REAL *d_a, *d_b, *d_c, *d_yy, *d_yyy, *d_u, *d_v; // myXindex myYindex are scalars const REAL stdX = 20.0*alpha*s0*sqrt(t); const REAL dx = stdX/numX; unsigned myXindex = static_cast<unsigned>(s0/dx) % numX; unsigned myYindex = static_cast<unsigned>(numY/2.0); unsigned numZ = max(numX,numY); int memsize_X = numX * sizeof(REAL); int memsize_Y = numY * sizeof(REAL); int memsize_T = numT * sizeof(REAL); int memsize_XY = numX * numY * sizeof(REAL); int memsize_OXY = outer * numX * numY * sizeof (REAL); int memsize_OZZ = outer * numZ * numZ * sizeof (REAL); // CPU variables h_result = (REAL*) malloc (memsize_OXY); // GPU variables cudaMalloc((void**)&d_result, memsize_OXY); //[outer][numX][numY] cudaMalloc((void**)&d_varX, memsize_XY); //[numX][numY] cudaMalloc((void**)&d_varY, memsize_XY); //[numX][numY] cudaMalloc((void**)&d_x, memsize_X); //[numX] cudaMalloc((void**)&d_y, memsize_Y); //[numY] cudaMalloc((void**)&d_timeline, memsize_T); //[numT] cudaMalloc((void**)&d_dxx, 4 * memsize_X); //[numX][4] cudaMalloc((void**)&d_dyy, 4 * memsize_Y); //[numY][4] //a b c yy yyy: [outer][numZ][numZ] cudaMalloc((void**)&d_a , memsize_OZZ); cudaMalloc((void**)&d_b , memsize_OZZ); cudaMalloc((void**)&d_c , memsize_OZZ); cudaMalloc((void**)&d_yy , memsize_OZZ); //y in seq code cudaMalloc((void**)&d_yyy, memsize_OZZ); //yy in seq code cudaMalloc((void**)&d_u , memsize_OXY); //d_u : [outer][numY][numX] cudaMalloc((void**)&d_v , memsize_OXY); //d_v : [outer][numX][numY] // variables for transposition REAL * d_u_tr; REAL * d_dyy_tr; REAL * d_a_tr; REAL * d_b_tr; REAL * d_c_tr; cudaMalloc((void**)&d_u_tr , memsize_OXY); //d_u : [outer][numY][numX] cudaMalloc((void**)&d_dyy_tr, memsize_Y *4); cudaMalloc((void**)&d_a_tr, memsize_OZZ); cudaMalloc((void**)&d_b_tr, memsize_OZZ); cudaMalloc((void**)&d_c_tr, memsize_OZZ); dim3 block_2D(T,T); dim3 grid_2D_4Y(1, ceil(((float)numY)/T)); dim3 grid_2D_XY(ceil(((float)numY)/ T ),ceil(((float)numX)/ T )); dim3 grid_2D_OX(ceil(((float)numX)/T), ceil((float)outer/T)); dim3 grid_2D_OY(ceil(((float)numY)/T), ceil((float)outer/T)); dim3 grid_2D_YX(ceil(((float)numX)/T), ceil(((float)numY)/T)); dim3 block_3D(T, T, 1); dim3 grid_3D_OXY(ceil(((float)numY)/T), ceil(((float)numX)/T), ceil(((float)outer)/1.0)); dim3 grid_3D_OZZ(ceil(((float)numZ)/T), ceil(((float)numZ)/T),ceil(((float)outer)/1.0)); dim3 grid_3D_OYX(ceil(((float)numX)/T), ceil(((float)numY)/T),ceil(((float)outer)/1.0) ); //GPU init initGrid_GPU(s0, alpha, nu,t, numX,numY, numT, d_x, d_y, d_timeline, myXindex, myYindex); initOperator_GPU( d_x, numX, d_dxx); initOperator_GPU( d_y, numY, d_dyy); // GPU setPayoff d_setPayoff<<<grid_3D_OXY, block_3D>>>(d_result, d_x, numY, numX, outer); // timeline loop for(int g = numT-2;g>=0;--g) { // second outer loop, g //----GPU updateParams d_updateParams<<< grid_2D_YX, block_2D >>>(d_varX, d_varY, d_x, d_y, d_timeline,g, alpha, beta, nu, numX, numY); // d_updateParams_sh<<< grid_2D_XY, block_2D >>>(d_varX, d_varY, d_x, d_y, d_timeline,g, // alpha, beta, nu, numX, numY); // d_updateParams_interchange<<< grid_2D_XY, block_2D >>>(d_varX, d_varY, d_x, d_y, d_timeline,g, // alpha, beta, nu, numX, numY); //---- GPU rollback Part_1 // matTranspose2D<<< grid_2D_4Y, block_2D >>>(d_dyy, d_dyy_tr, numY, 4); // transpose d_dyy d_explicit_xy_implicit_x<<<grid_3D_OYX, block_3D>>>(d_u,d_v,d_a,d_b,d_c, d_varX,d_varY,d_timeline,d_dxx,d_dyy,d_result, g, numX, numY, outer, numZ); // d_explicit_xy_implicit_x_interchange<<<grid_3D_OXY, block_3D>>>(d_u_tr,d_v,d_a_tr,d_b_tr,d_c_tr, // d_varX,d_varY,d_timeline,d_dxx,d_dyy_tr,d_result, g, numX, numY, outer, numZ); // transpose back the variables // sgmMatTranspose <<< grid_3D_OXY, block_3D>>>( d_u_tr, d_u, numX, numY ); // sgmMatTranspose <<< grid_3D_OZZ, block_3D>>>( d_a_tr, d_a, numZ, numZ ); // sgmMatTranspose <<< grid_3D_OZZ, block_3D>>>( d_b_tr, d_b, numZ, numZ ); // sgmMatTranspose <<< grid_3D_OZZ, block_3D>>>( d_c_tr, d_c, numZ, numZ ); //------ GPU rollback part-2 d_tridag_implicit_x <<< grid_2D_OY, block_2D >>> (d_a,d_b,d_c, d_u, numX,d_u,d_yyy,numX,numY,outer,numZ,numY); // -------GPU rollback part-3 d_implicit_y<<< grid_3D_OXY, block_3D >>>(d_u,d_v,d_a,d_b,d_c, d_yy, d_varY,d_timeline, d_dyy, g, numX, numY, outer, numZ); // sgmMatTranspose <<< grid_3D_OYX, block_3D>>>( d_u, d_u_tr, numY, numX ); // transpose u // d_implicit_y_trans<<< grid_3D_OXY, block_3D >>>(d_u_tr,d_v,d_a,d_b,d_c, d_yy, // d_varY,d_timeline, d_dyy_tr, g, numX, numY, outer, numZ); //---------- GPU rollback part-4 d_tridag_implicit_y <<< grid_2D_OX, block_2D >>> (d_a,d_b,d_c,d_yy,numY,d_result,d_yyy,numX,numY,outer,numZ,numX); } // Timeline loop end // read the final result cudaMemcpy( h_result , d_result , memsize_OXY , cudaMemcpyDeviceToHost); #pragma omp parallel for default(shared) schedule(static) for( unsigned k = 0; k < outer; ++ k ) //outermost loop k res[k] = h_result[XY(k,myXindex,myYindex)]; cudaFree(d_x); cudaFree(d_y); cudaFree(d_dxx);cudaFree(d_dyy); cudaFree(d_timeline); cudaFree(d_result); cudaFree(d_varX); cudaFree(d_varY); cudaFree(d_a); cudaFree(d_b);cudaFree(d_c); cudaFree(d_yy);cudaFree(d_yyy); cudaFree(d_u); cudaFree(d_v); cudaFree(d_u_tr); cudaFree(d_dyy_tr); cudaFree(d_a_tr); cudaFree(d_b_tr); cudaFree(d_c_tr); free(h_result); // #endif }
bfc3b813874d7636e1a32a45ec0e3f579d164e4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @brief Implementation of different distances * * @file distance_utils.cu * @author David Chan * @date 2018-04-04 * Copyright (c) 2018, Regents of the University of California */ #include "include/util/distance_utils.h" #include <chrono> #include <future> // This really does a simultaneous row/col matrix vector broadcast // to compute ||x^2|| + ||y^2|| - 2 x^Ty. // Added fabs to deal with numerical instabilities. I think this is a // reasonable solution __global__ void tsnecuda::util::AssembleDistances( const float *__restrict__ d_squared_norms, float *__restrict__ d_dot_products, const int num_points) { const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i < num_points) && (j < num_points)) d_dot_products[i * num_points + j] = fabs(d_squared_norms[j] + d_squared_norms[i] - 2 * d_dot_products[i * num_points + j]); } // Code from https://github.com/OrangeOwlSolutions/cuBLAS/blob/master/All_pairs_distances.cu // Expects num_points x num_dims matrix in points // Squared norms taken from diagnoal of dot product which should be faster // and result in actually zeroing out the diagonal in assemble_final_result void tsnecuda::util::SquaredPairwiseDistance(hipblasHandle_t &handle, thrust::device_vector<float> &d_distances, const thrust::device_vector<float> &d_points, const int num_points, const int num_dims) { const int kBlockSize = 16; float kAlpha = 1.f; float kBeta = 0.f; // TODO(Roshan): Could replace this with hipblasSsyrk, might be faster? CublasSafeCall(hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, num_points, num_points, num_dims, &kAlpha, thrust::raw_pointer_cast(d_points.data()), num_points, thrust::raw_pointer_cast(d_points.data()), num_points, &kBeta, thrust::raw_pointer_cast(d_distances.data()), num_points)); typedef thrust::device_vector<float>::iterator Iterator; tsnecuda::util::StridedRange<Iterator> diagonalized(d_distances.begin(), d_distances.end(), num_points + 1); thrust::device_vector<float> squared_norms(num_points); thrust::copy(diagonalized.begin(), diagonalized.end(), squared_norms.begin()); dim3 kBlockDimensions(kBlockSize, kBlockSize); dim3 kGridDimensions(iDivUp(num_points, kBlockSize), iDivUp(num_points, kBlockSize)); hipLaunchKernelGGL(( tsnecuda::util::AssembleDistances), dim3(kGridDimensions), dim3(kBlockDimensions), 0, 0, thrust::raw_pointer_cast(squared_norms.data()), thrust::raw_pointer_cast(d_distances.data()), num_points); } void tsnecuda::util::PairwiseDistance(hipblasHandle_t &handle, thrust::device_vector<float> &d_distances, const thrust::device_vector<float> &d_points, const int num_points, const int num_dims) { tsnecuda::util::SquaredPairwiseDistance(handle, d_distances, d_points, num_points, num_dims); tsnecuda::util::SqrtDeviceVector(d_distances, d_distances); } void tsnecuda::util::KNearestNeighbors(tsnecuda::GpuOptions &gpu_opt, tsnecuda::Options &base_options, int64_t *indices, float *distances, const float *const points, const int num_dims, const int num_points, const int num_near_neighbors) { const int32_t kNumCells = static_cast<int32_t>( std::sqrt(static_cast<float>(num_points))); const int32_t kNumCellsToProbe = 20; // Construct the CPU version of the index faiss::IndexFlatL2 quantizer(num_dims); faiss::IndexIVFFlat cpu_index(&quantizer, num_dims, kNumCells, faiss::METRIC_L2); cpu_index.nprobe = kNumCellsToProbe; if (num_near_neighbors < 1024) { int ngpus = faiss::gpu::getNumDevices(); std::vector<faiss::gpu::GpuResourcesProvider *> res; std::vector<int> devs; for (int i = 0; i < ngpus; i++) { res.push_back(new faiss::gpu::StandardGpuResources); devs.push_back(i); } // Convert the CPU index to GPU index faiss::Index *search_index = faiss::gpu::index_cpu_to_gpu_multiple(res, devs, &cpu_index); search_index->train(num_points, points); search_index->add(num_points, points); search_index->search(num_points, points, num_near_neighbors, distances, indices); delete search_index; for (int i = 0; i < ngpus; i++) { delete res[i]; } } else { // Construct the index table on the CPU (since the GPU // can only handle 1023 neighbors) cpu_index.train(num_points, points); cpu_index.add(num_points, points); // Perform the KNN query cpu_index.search(num_points, points, num_near_neighbors, distances, indices); } } // TODO: Add -1 notification here... and how to deal with it if it happens // TODO: Maybe think about getting FAISS to return integers (long-term todo) __global__ void tsnecuda::util::PostprocessNeighborIndicesKernel( volatile int *__restrict__ indices, const long *__restrict__ long_indices, const int num_points, const int num_neighbors) { register int TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= num_points * num_neighbors) return; // Set pij to 0 for each of the broken values - Note: this should be handled in the ComputePijKernel now // if (matrix[TID] == 1.0f) matrix[TID] = 0.0f; indices[TID] = (int)long_indices[TID]; } void tsnecuda::util::PostprocessNeighborIndices( tsnecuda::GpuOptions &gpu_opt, thrust::device_vector<int> &indices, thrust::device_vector<int64_t> &long_indices, const int num_points, const int num_neighbors) { const int num_threads = 128; const int num_blocks = iDivUp(num_points * num_neighbors, num_threads); hipLaunchKernelGGL(( tsnecuda::util::PostprocessNeighborIndicesKernel), dim3(num_blocks), dim3(num_threads), 0, 0, thrust::raw_pointer_cast(indices.data()), thrust::raw_pointer_cast(long_indices.data()), num_points, num_neighbors); GpuErrorCheck(hipDeviceSynchronize()); }
bfc3b813874d7636e1a32a45ec0e3f579d164e4c.cu
/** * @brief Implementation of different distances * * @file distance_utils.cu * @author David Chan * @date 2018-04-04 * Copyright (c) 2018, Regents of the University of California */ #include "include/util/distance_utils.h" #include <chrono> #include <future> // This really does a simultaneous row/col matrix vector broadcast // to compute ||x^2|| + ||y^2|| - 2 x^Ty. // Added fabs to deal with numerical instabilities. I think this is a // reasonable solution __global__ void tsnecuda::util::AssembleDistances( const float *__restrict__ d_squared_norms, float *__restrict__ d_dot_products, const int num_points) { const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i < num_points) && (j < num_points)) d_dot_products[i * num_points + j] = fabs(d_squared_norms[j] + d_squared_norms[i] - 2 * d_dot_products[i * num_points + j]); } // Code from https://github.com/OrangeOwlSolutions/cuBLAS/blob/master/All_pairs_distances.cu // Expects num_points x num_dims matrix in points // Squared norms taken from diagnoal of dot product which should be faster // and result in actually zeroing out the diagonal in assemble_final_result void tsnecuda::util::SquaredPairwiseDistance(cublasHandle_t &handle, thrust::device_vector<float> &d_distances, const thrust::device_vector<float> &d_points, const int num_points, const int num_dims) { const int kBlockSize = 16; float kAlpha = 1.f; float kBeta = 0.f; // TODO(Roshan): Could replace this with cublasSsyrk, might be faster? CublasSafeCall(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, num_points, num_points, num_dims, &kAlpha, thrust::raw_pointer_cast(d_points.data()), num_points, thrust::raw_pointer_cast(d_points.data()), num_points, &kBeta, thrust::raw_pointer_cast(d_distances.data()), num_points)); typedef thrust::device_vector<float>::iterator Iterator; tsnecuda::util::StridedRange<Iterator> diagonalized(d_distances.begin(), d_distances.end(), num_points + 1); thrust::device_vector<float> squared_norms(num_points); thrust::copy(diagonalized.begin(), diagonalized.end(), squared_norms.begin()); dim3 kBlockDimensions(kBlockSize, kBlockSize); dim3 kGridDimensions(iDivUp(num_points, kBlockSize), iDivUp(num_points, kBlockSize)); tsnecuda::util::AssembleDistances<<<kGridDimensions, kBlockDimensions>>>( thrust::raw_pointer_cast(squared_norms.data()), thrust::raw_pointer_cast(d_distances.data()), num_points); } void tsnecuda::util::PairwiseDistance(cublasHandle_t &handle, thrust::device_vector<float> &d_distances, const thrust::device_vector<float> &d_points, const int num_points, const int num_dims) { tsnecuda::util::SquaredPairwiseDistance(handle, d_distances, d_points, num_points, num_dims); tsnecuda::util::SqrtDeviceVector(d_distances, d_distances); } void tsnecuda::util::KNearestNeighbors(tsnecuda::GpuOptions &gpu_opt, tsnecuda::Options &base_options, int64_t *indices, float *distances, const float *const points, const int num_dims, const int num_points, const int num_near_neighbors) { const int32_t kNumCells = static_cast<int32_t>( std::sqrt(static_cast<float>(num_points))); const int32_t kNumCellsToProbe = 20; // Construct the CPU version of the index faiss::IndexFlatL2 quantizer(num_dims); faiss::IndexIVFFlat cpu_index(&quantizer, num_dims, kNumCells, faiss::METRIC_L2); cpu_index.nprobe = kNumCellsToProbe; if (num_near_neighbors < 1024) { int ngpus = faiss::gpu::getNumDevices(); std::vector<faiss::gpu::GpuResourcesProvider *> res; std::vector<int> devs; for (int i = 0; i < ngpus; i++) { res.push_back(new faiss::gpu::StandardGpuResources); devs.push_back(i); } // Convert the CPU index to GPU index faiss::Index *search_index = faiss::gpu::index_cpu_to_gpu_multiple(res, devs, &cpu_index); search_index->train(num_points, points); search_index->add(num_points, points); search_index->search(num_points, points, num_near_neighbors, distances, indices); delete search_index; for (int i = 0; i < ngpus; i++) { delete res[i]; } } else { // Construct the index table on the CPU (since the GPU // can only handle 1023 neighbors) cpu_index.train(num_points, points); cpu_index.add(num_points, points); // Perform the KNN query cpu_index.search(num_points, points, num_near_neighbors, distances, indices); } } // TODO: Add -1 notification here... and how to deal with it if it happens // TODO: Maybe think about getting FAISS to return integers (long-term todo) __global__ void tsnecuda::util::PostprocessNeighborIndicesKernel( volatile int *__restrict__ indices, const long *__restrict__ long_indices, const int num_points, const int num_neighbors) { register int TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= num_points * num_neighbors) return; // Set pij to 0 for each of the broken values - Note: this should be handled in the ComputePijKernel now // if (matrix[TID] == 1.0f) matrix[TID] = 0.0f; indices[TID] = (int)long_indices[TID]; } void tsnecuda::util::PostprocessNeighborIndices( tsnecuda::GpuOptions &gpu_opt, thrust::device_vector<int> &indices, thrust::device_vector<int64_t> &long_indices, const int num_points, const int num_neighbors) { const int num_threads = 128; const int num_blocks = iDivUp(num_points * num_neighbors, num_threads); tsnecuda::util::PostprocessNeighborIndicesKernel<<<num_blocks, num_threads>>>( thrust::raw_pointer_cast(indices.data()), thrust::raw_pointer_cast(long_indices.data()), num_points, num_neighbors); GpuErrorCheck(cudaDeviceSynchronize()); }
8731a60aeee9493a55cb171df7c9868874ec776f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "ShortestPath3.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *Arr1 = NULL; hipMalloc(&Arr1, XSIZE*YSIZE); float *Arr2 = NULL; hipMalloc(&Arr2, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( ShortestPath3), dim3(gridBlock),dim3(threadBlock), 0, 0, Arr1,Arr2,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( ShortestPath3), dim3(gridBlock),dim3(threadBlock), 0, 0, Arr1,Arr2,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( ShortestPath3), dim3(gridBlock),dim3(threadBlock), 0, 0, Arr1,Arr2,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8731a60aeee9493a55cb171df7c9868874ec776f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "ShortestPath3.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *Arr1 = NULL; cudaMalloc(&Arr1, XSIZE*YSIZE); float *Arr2 = NULL; cudaMalloc(&Arr2, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); ShortestPath3<<<gridBlock,threadBlock>>>(Arr1,Arr2,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { ShortestPath3<<<gridBlock,threadBlock>>>(Arr1,Arr2,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { ShortestPath3<<<gridBlock,threadBlock>>>(Arr1,Arr2,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
78b7b6d956dfcdcd6d6e0bcd3c26c1a7969986c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 csymv_upper.cu is nearly identical to chemv_upper.cu, just change names and drop MAGMA_C_CONJ. chemv_kernel_U (upper) in chemv_upper.cu is very similar to chemv_kernel_L (lower) in chemv.cu; diff the two files to compare. @generated from magmablas/zhemv_upper.cu, normal z -> c, Thu Oct 8 23:05:33 2020 @author Mark Gates */ #include "magma_internal.h" #include "commonblas_c.h" #define PRECISION_c #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /***************************************************************************//** Upper case, compute block multiply, work = A*x, for any size n: [ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ] The order is different from the lower case, because the upper case processes a block row from the diagonal to the right, whereas the lower case processes a block row from the diagonal to the left. Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. *******************************************************************************/ __global__ void chemv_kernel_U( int n, magmaFloatComplex const * __restrict__ A, int lda, magmaFloatComplex const * __restrict__ x, int incx, magmaFloatComplex * __restrict__ work) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); magmaFloatComplex psum, psum_t; magmaFloatComplex total = MAGMA_C_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ] __shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag magmaFloatComplex rA[4]; magmaFloatComplex psums_t[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx_blk x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_blk[tx] = x[0]; } else { sx_blk[tx] = MAGMA_C_ZERO; } } // -------------------- // move to block row work += blk*lda; // work is work(0, blk) A += blk_ind; // A is A(blk_ind, 0) A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2) // move to 32x32 diag block A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_C_ZERO; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2) } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_C_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_C_ZERO; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_C_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - (tx2) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_C_ZERO; } } if ( tx2 >= partial ) { A = A + (tx2) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_C_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += MAGMA_C_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum_t = MAGMA_C_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum_t; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 64x64 block right of diag in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 ) A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X ) A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty) // Unlike lower case, don't adjust A here for partial # of rows. // Since block is right of diagonal, it must have all NB rows, // but can have < NB columns, dealt with when loading below. x -= blk_ind*incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; // cycle over blocks jj right of diagonal, in block row blk for (int jj=blk+1; jj < gridDim.x; ++jj) { partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0); // load 64x1 block x(jj_ind + 0:63) into sx_jj if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_jj[tx] = x[jj*NB_X*incx]; } else { sx_jj[tx] = MAGMA_C_ZERO; } } __syncthreads(); for (int k=0; k < 4; k++) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 if ( partial ) { #pragma unroll for (int j=0; j < 4; j++) { if ( 4*ty + j + k*quarter_NB_X < partial ) { rA[j] = A[j*lda]; } else { rA[j] = MAGMA_C_ZERO; } } } else { #pragma unroll for (int j=0; j < 4; j++) { rA[j] = A[j*lda]; } } // 1) multiply 64x16 block A_{blk,jj} * x_jj // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply 16x64 block A_{blk,jj} * x_blk, // storing each product Aji*xi to sA(j,i) #pragma unroll for (int j=0; j < 4; j++) { total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj sA16(ty*4 + j, tx) = MAGMA_C_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum_t = MAGMA_C_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums of transposed result, y_jj (locally) psums_t[k] = psum_t; // move right to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty) } // already at next 64x64 block // A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty) // store partial row sums of transposed result, y_jj #pragma unroll for (int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k]; } __syncthreads(); // sum up partial row sums of transposed result, y_jj, and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) { int ty4_nb4 = ty4*quarter_NB_X; psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4) + sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4) + sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4) + sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4) + sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4) + sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4) + sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4) + sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4); work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } // store row sums sA16(ty, tx) = total; __syncthreads(); partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); // sum up final total, y_blk, for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } // end chemv_kernel_U /***************************************************************************//** Upper case, sum up final results Each block sums one block row; each thread sums one row. On input (for 3 blocks): [ (A11*x1 + A12*x2 + A13*x3) --- --- ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] On output: [ (A11*x1 + A12*x2 + A13*x3) ] y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y [ (A13^H*x1) + (A23^H*x2) + (A33*x3) ] *******************************************************************************/ __global__ void chemv_kernel_U_sum( int n, magmaFloatComplex alpha, int lda, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy, magmaFloatComplex const * __restrict__ work ) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; // Don't write outside [0, ..., n) if ( ind < n ) { work += ind; magmaFloatComplex Ax = MAGMA_C_ZERO; for (int j = 0; j <= blk; ++j) { Ax += work[0]; work += lda; } y[ind * incy] = beta*y[ind * incy] + alpha*Ax; } }
78b7b6d956dfcdcd6d6e0bcd3c26c1a7969986c4.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 csymv_upper.cu is nearly identical to chemv_upper.cu, just change names and drop MAGMA_C_CONJ. chemv_kernel_U (upper) in chemv_upper.cu is very similar to chemv_kernel_L (lower) in chemv.cu; diff the two files to compare. @generated from magmablas/zhemv_upper.cu, normal z -> c, Thu Oct 8 23:05:33 2020 @author Mark Gates */ #include "magma_internal.h" #include "commonblas_c.h" #define PRECISION_c #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /***************************************************************************//** Upper case, compute block multiply, work = A*x, for any size n: [ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ] The order is different from the lower case, because the upper case processes a block row from the diagonal to the right, whereas the lower case processes a block row from the diagonal to the left. Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. *******************************************************************************/ __global__ void chemv_kernel_U( int n, magmaFloatComplex const * __restrict__ A, int lda, magmaFloatComplex const * __restrict__ x, int incx, magmaFloatComplex * __restrict__ work) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); magmaFloatComplex psum, psum_t; magmaFloatComplex total = MAGMA_C_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ] __shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag magmaFloatComplex rA[4]; magmaFloatComplex psums_t[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx_blk x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_blk[tx] = x[0]; } else { sx_blk[tx] = MAGMA_C_ZERO; } } // -------------------- // move to block row work += blk*lda; // work is work(0, blk) A += blk_ind; // A is A(blk_ind, 0) A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2) // move to 32x32 diag block A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_C_ZERO; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2) } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_C_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_C_ZERO; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_C_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - (tx2) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_C_ZERO; } } if ( tx2 >= partial ) { A = A + (tx2) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_C_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += MAGMA_C_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum_t = MAGMA_C_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum_t; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 64x64 block right of diag in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 ) A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X ) A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty) // Unlike lower case, don't adjust A here for partial # of rows. // Since block is right of diagonal, it must have all NB rows, // but can have < NB columns, dealt with when loading below. x -= blk_ind*incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; // cycle over blocks jj right of diagonal, in block row blk for (int jj=blk+1; jj < gridDim.x; ++jj) { partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0); // load 64x1 block x(jj_ind + 0:63) into sx_jj if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_jj[tx] = x[jj*NB_X*incx]; } else { sx_jj[tx] = MAGMA_C_ZERO; } } __syncthreads(); for (int k=0; k < 4; k++) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 if ( partial ) { #pragma unroll for (int j=0; j < 4; j++) { if ( 4*ty + j + k*quarter_NB_X < partial ) { rA[j] = A[j*lda]; } else { rA[j] = MAGMA_C_ZERO; } } } else { #pragma unroll for (int j=0; j < 4; j++) { rA[j] = A[j*lda]; } } // 1) multiply 64x16 block A_{blk,jj} * x_jj // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply 16x64 block A_{blk,jj} * x_blk, // storing each product Aji*xi to sA(j,i) #pragma unroll for (int j=0; j < 4; j++) { total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj sA16(ty*4 + j, tx) = MAGMA_C_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum_t = MAGMA_C_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums of transposed result, y_jj (locally) psums_t[k] = psum_t; // move right to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty) } // already at next 64x64 block // A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty) // store partial row sums of transposed result, y_jj #pragma unroll for (int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k]; } __syncthreads(); // sum up partial row sums of transposed result, y_jj, and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) { int ty4_nb4 = ty4*quarter_NB_X; psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4) + sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4) + sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4) + sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4) + sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4) + sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4) + sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4) + sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4); work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } // store row sums sA16(ty, tx) = total; __syncthreads(); partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); // sum up final total, y_blk, for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } // end chemv_kernel_U /***************************************************************************//** Upper case, sum up final results Each block sums one block row; each thread sums one row. On input (for 3 blocks): [ (A11*x1 + A12*x2 + A13*x3) --- --- ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] On output: [ (A11*x1 + A12*x2 + A13*x3) ] y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y [ (A13^H*x1) + (A23^H*x2) + (A33*x3) ] *******************************************************************************/ __global__ void chemv_kernel_U_sum( int n, magmaFloatComplex alpha, int lda, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy, magmaFloatComplex const * __restrict__ work ) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; // Don't write outside [0, ..., n) if ( ind < n ) { work += ind; magmaFloatComplex Ax = MAGMA_C_ZERO; for (int j = 0; j <= blk; ++j) { Ax += work[0]; work += lda; } y[ind * incy] = beta*y[ind * incy] + alpha*Ax; } }
d3185539913fd49fa45d52706c2e6a81bd103a34.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************** * Copyright (c) 2017-2019 by the mfmg authors * * All rights reserved. * * * * This file is part of the mfmg library. mfmg is distributed under a BSD * * 3-clause license. For the licensing terms see the LICENSE file in the * * top-level directory * * * * SPDX-License-Identifier: BSD-3-Clause * *************************************************************************/ #define BOOST_TEST_MODULE eigenvectors_device #include <mfmg/cuda/amge_device.cuh> #include <mfmg/cuda/cuda_mesh_evaluator.cuh> #include <mfmg/cuda/sparse_matrix_device.cuh> #include <mfmg/cuda/utils.cuh> #include <deal.II/distributed/tria.h> #include <deal.II/dofs/dof_accessor.h> #include <deal.II/fe/fe_q.h> #include <deal.II/grid/grid_generator.h> #include <deal.II/lac/la_parallel_vector.h> #include "main.cc" template <int dim> class DiagonalTestMeshEvaluator : public mfmg::CudaMeshEvaluator<dim> { public: DiagonalTestMeshEvaluator(mfmg::CudaHandle const &cuda_handle, dealii::DoFHandler<dim> &dof_handler, dealii::AffineConstraints<double> &constraints) : mfmg::CudaMeshEvaluator<dim>(cuda_handle, dof_handler, constraints) { } virtual ~DiagonalTestMeshEvaluator() override = default; void evaluate_agglomerate( dealii::DoFHandler<2> &dof_handler, dealii::AffineConstraints<double> &constraint_matrix, mfmg::SparseMatrixDevice<double> &system_matrix_dev) const override final { // Build the matrix on the host dealii::FE_Q<2> fe(1); dof_handler.distribute_dofs(fe); constraint_matrix.clear(); dealii::SparsityPattern system_sparsity_pattern; dealii::SparseMatrix<double> system_matrix; unsigned int const size = 30; std::vector<std::vector<unsigned int>> column_indices( size, std::vector<unsigned int>(1)); for (unsigned int i = 0; i < size; ++i) column_indices[i][0] = i; system_sparsity_pattern.copy_from(size, size, column_indices.begin(), column_indices.end()); system_matrix.reinit(system_sparsity_pattern); for (unsigned int i = 0; i < size; ++i) { system_matrix.diag_element(i) = static_cast<double>(i + 1); } // Move the matrices to the device system_matrix_dev = std::move(mfmg::convert_matrix(system_matrix)); } void evaluate_global( dealii::DoFHandler<2> &dof_handler, dealii::AffineConstraints<double> &constraint_matrix, mfmg::SparseMatrixDevice<double> &system_matrix_dev) const override final { } }; BOOST_AUTO_TEST_CASE(diagonal) { int constexpr dim = 2; using Vector = dealii::LinearAlgebra::distributed::Vector<double>; using DummyMeshEvaluator = mfmg::CudaMeshEvaluator<dim>; dealii::parallel::distributed::Triangulation<2> triangulation(MPI_COMM_WORLD); dealii::GridGenerator::hyper_cube(triangulation); triangulation.refine_global(3); dealii::FE_Q<dim> fe(1); dealii::DoFHandler<dim> dof_handler(triangulation); dof_handler.distribute_dofs(fe); // Initialize the CUDA libraries mfmg::CudaHandle cuda_handle; mfmg::AMGe_device<dim, DummyMeshEvaluator, Vector> amge( MPI_COMM_WORLD, dof_handler, cuda_handle); unsigned int const n_eigenvectors = 5; std::map<typename dealii::Triangulation<dim>::active_cell_iterator, typename dealii::DoFHandler<dim>::active_cell_iterator> patch_to_global_map; for (auto cell : dof_handler.active_cell_iterators()) patch_to_global_map[cell] = cell; dealii::AffineConstraints<double> constraints; DiagonalTestMeshEvaluator<dim> evaluator(cuda_handle, dof_handler, constraints); double *eigenvalues_dev; double *eigenvectors_dev; double *diag_elements_dev; std::vector<dealii::types::global_dof_index> dof_indices_map; std::tie(eigenvalues_dev, eigenvectors_dev, diag_elements_dev, dof_indices_map) = amge.compute_local_eigenvectors(n_eigenvectors, triangulation, patch_to_global_map, evaluator); unsigned int const n_dofs = dof_handler.n_dofs(); std::vector<dealii::types::global_dof_index> ref_dof_indices_map(n_dofs); std::iota(ref_dof_indices_map.begin(), ref_dof_indices_map.end(), 0); // We cannot use BOOST_TEST because it uses variadic template and there is // bug in CUDA 7.0 and CUDA 8.0 with variadic templates // See http://www.boost.org/doc/libs/1_66_0/boost/config/compiler/nvcc.hpp for (unsigned int i = 0; i < n_dofs; ++i) BOOST_CHECK_EQUAL(dof_indices_map[i], ref_dof_indices_map[i]); unsigned int const eigenvector_size = 30; std::vector<double> ref_eigenvalues(n_eigenvectors); std::vector<dealii::Vector<double>> ref_eigenvectors( n_eigenvectors, dealii::Vector<double>(eigenvector_size)); for (unsigned int i = 0; i < n_eigenvectors; ++i) { ref_eigenvalues[i] = static_cast<double>(i + 1); ref_eigenvectors[i][i] = 1.; } hipError_t cuda_error_code; for (unsigned int i = 0; i < n_eigenvectors; ++i) { std::vector<double> eigenvalues(n_eigenvectors); cuda_error_code = hipMemcpy(&eigenvalues[0], eigenvalues_dev, n_eigenvectors * sizeof(double), hipMemcpyDeviceToHost); mfmg::ASSERT_CUDA(cuda_error_code); BOOST_CHECK_CLOSE(eigenvalues[i], ref_eigenvalues[i], 1e-12); std::vector<double> eigenvectors(n_eigenvectors * eigenvector_size); cuda_error_code = hipMemcpy(&eigenvectors[0], eigenvectors_dev, n_eigenvectors * eigenvector_size * sizeof(double), hipMemcpyDeviceToHost); mfmg::ASSERT_CUDA(cuda_error_code); for (unsigned int j = 0; j < eigenvector_size; ++j) BOOST_CHECK_CLOSE(std::abs(eigenvectors[i * eigenvector_size + j]), ref_eigenvectors[i][j], 1e-12); } // Free memory allocated on device mfmg::cuda_free(eigenvalues_dev); mfmg::cuda_free(eigenvectors_dev); mfmg::cuda_free(diag_elements_dev); }
d3185539913fd49fa45d52706c2e6a81bd103a34.cu
/************************************************************************** * Copyright (c) 2017-2019 by the mfmg authors * * All rights reserved. * * * * This file is part of the mfmg library. mfmg is distributed under a BSD * * 3-clause license. For the licensing terms see the LICENSE file in the * * top-level directory * * * * SPDX-License-Identifier: BSD-3-Clause * *************************************************************************/ #define BOOST_TEST_MODULE eigenvectors_device #include <mfmg/cuda/amge_device.cuh> #include <mfmg/cuda/cuda_mesh_evaluator.cuh> #include <mfmg/cuda/sparse_matrix_device.cuh> #include <mfmg/cuda/utils.cuh> #include <deal.II/distributed/tria.h> #include <deal.II/dofs/dof_accessor.h> #include <deal.II/fe/fe_q.h> #include <deal.II/grid/grid_generator.h> #include <deal.II/lac/la_parallel_vector.h> #include "main.cc" template <int dim> class DiagonalTestMeshEvaluator : public mfmg::CudaMeshEvaluator<dim> { public: DiagonalTestMeshEvaluator(mfmg::CudaHandle const &cuda_handle, dealii::DoFHandler<dim> &dof_handler, dealii::AffineConstraints<double> &constraints) : mfmg::CudaMeshEvaluator<dim>(cuda_handle, dof_handler, constraints) { } virtual ~DiagonalTestMeshEvaluator() override = default; void evaluate_agglomerate( dealii::DoFHandler<2> &dof_handler, dealii::AffineConstraints<double> &constraint_matrix, mfmg::SparseMatrixDevice<double> &system_matrix_dev) const override final { // Build the matrix on the host dealii::FE_Q<2> fe(1); dof_handler.distribute_dofs(fe); constraint_matrix.clear(); dealii::SparsityPattern system_sparsity_pattern; dealii::SparseMatrix<double> system_matrix; unsigned int const size = 30; std::vector<std::vector<unsigned int>> column_indices( size, std::vector<unsigned int>(1)); for (unsigned int i = 0; i < size; ++i) column_indices[i][0] = i; system_sparsity_pattern.copy_from(size, size, column_indices.begin(), column_indices.end()); system_matrix.reinit(system_sparsity_pattern); for (unsigned int i = 0; i < size; ++i) { system_matrix.diag_element(i) = static_cast<double>(i + 1); } // Move the matrices to the device system_matrix_dev = std::move(mfmg::convert_matrix(system_matrix)); } void evaluate_global( dealii::DoFHandler<2> &dof_handler, dealii::AffineConstraints<double> &constraint_matrix, mfmg::SparseMatrixDevice<double> &system_matrix_dev) const override final { } }; BOOST_AUTO_TEST_CASE(diagonal) { int constexpr dim = 2; using Vector = dealii::LinearAlgebra::distributed::Vector<double>; using DummyMeshEvaluator = mfmg::CudaMeshEvaluator<dim>; dealii::parallel::distributed::Triangulation<2> triangulation(MPI_COMM_WORLD); dealii::GridGenerator::hyper_cube(triangulation); triangulation.refine_global(3); dealii::FE_Q<dim> fe(1); dealii::DoFHandler<dim> dof_handler(triangulation); dof_handler.distribute_dofs(fe); // Initialize the CUDA libraries mfmg::CudaHandle cuda_handle; mfmg::AMGe_device<dim, DummyMeshEvaluator, Vector> amge( MPI_COMM_WORLD, dof_handler, cuda_handle); unsigned int const n_eigenvectors = 5; std::map<typename dealii::Triangulation<dim>::active_cell_iterator, typename dealii::DoFHandler<dim>::active_cell_iterator> patch_to_global_map; for (auto cell : dof_handler.active_cell_iterators()) patch_to_global_map[cell] = cell; dealii::AffineConstraints<double> constraints; DiagonalTestMeshEvaluator<dim> evaluator(cuda_handle, dof_handler, constraints); double *eigenvalues_dev; double *eigenvectors_dev; double *diag_elements_dev; std::vector<dealii::types::global_dof_index> dof_indices_map; std::tie(eigenvalues_dev, eigenvectors_dev, diag_elements_dev, dof_indices_map) = amge.compute_local_eigenvectors(n_eigenvectors, triangulation, patch_to_global_map, evaluator); unsigned int const n_dofs = dof_handler.n_dofs(); std::vector<dealii::types::global_dof_index> ref_dof_indices_map(n_dofs); std::iota(ref_dof_indices_map.begin(), ref_dof_indices_map.end(), 0); // We cannot use BOOST_TEST because it uses variadic template and there is // bug in CUDA 7.0 and CUDA 8.0 with variadic templates // See http://www.boost.org/doc/libs/1_66_0/boost/config/compiler/nvcc.hpp for (unsigned int i = 0; i < n_dofs; ++i) BOOST_CHECK_EQUAL(dof_indices_map[i], ref_dof_indices_map[i]); unsigned int const eigenvector_size = 30; std::vector<double> ref_eigenvalues(n_eigenvectors); std::vector<dealii::Vector<double>> ref_eigenvectors( n_eigenvectors, dealii::Vector<double>(eigenvector_size)); for (unsigned int i = 0; i < n_eigenvectors; ++i) { ref_eigenvalues[i] = static_cast<double>(i + 1); ref_eigenvectors[i][i] = 1.; } cudaError_t cuda_error_code; for (unsigned int i = 0; i < n_eigenvectors; ++i) { std::vector<double> eigenvalues(n_eigenvectors); cuda_error_code = cudaMemcpy(&eigenvalues[0], eigenvalues_dev, n_eigenvectors * sizeof(double), cudaMemcpyDeviceToHost); mfmg::ASSERT_CUDA(cuda_error_code); BOOST_CHECK_CLOSE(eigenvalues[i], ref_eigenvalues[i], 1e-12); std::vector<double> eigenvectors(n_eigenvectors * eigenvector_size); cuda_error_code = cudaMemcpy(&eigenvectors[0], eigenvectors_dev, n_eigenvectors * eigenvector_size * sizeof(double), cudaMemcpyDeviceToHost); mfmg::ASSERT_CUDA(cuda_error_code); for (unsigned int j = 0; j < eigenvector_size; ++j) BOOST_CHECK_CLOSE(std::abs(eigenvectors[i * eigenvector_size + j]), ref_eigenvectors[i][j], 1e-12); } // Free memory allocated on device mfmg::cuda_free(eigenvalues_dev); mfmg::cuda_free(eigenvectors_dev); mfmg::cuda_free(diag_elements_dev); }
e90ac3b2b82009fbe14f31c6ef6f90015af761e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "MWDepthConcatenationLayerImpl.hpp" #include "MWDepthConcatenationLayer.hpp" #include "MWTargetNetworkImpl.hpp" #include <stdarg.h> #include <cassert> MWDepthConcatenationLayerImpl::MWDepthConcatenationLayerImpl(MWCNNLayer* layer, MWTargetNetworkImpl* ntwk_impl, int outbufIdx) : MWCNNLayerImpl(layer, ntwk_impl) { createDepthConcatenationLayer(outbufIdx); } MWDepthConcatenationLayerImpl::~MWDepthConcatenationLayerImpl() { } void MWDepthConcatenationLayerImpl::createDepthConcatenationLayer(int outbufIdx) { MWTensor* opTensor = getLayer()->getOutputTensor(0); if (outbufIdx < 0) { CUDA_CALL(hipMalloc((void**)&PtkeOkuClHzhOfpmBevf, sizeof(float)*opTensor->getHeight()*opTensor->getWidth()*opTensor->getChannels()*opTensor->getBatchSize())); } else { setData(enPbWLzEmxYCBmzGJutZ->memBuffer[outbufIdx]); opTensor->setopBufIndex(outbufIdx); } CUDNN_CALL(cudnnCreateTensorDescriptor(getOutputDescriptor(0))); CUDNN_CALL(cudnnSetTensor4dDescriptor(*getOutputDescriptor(0), CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, opTensor->getBatchSize(), opTensor->getChannels(), opTensor->getHeight(), opTensor->getWidth())); } void __global__ __launch_bounds__(1024) concatImpl(float* in, float* out, size_t numElems, size_t batchSize, size_t outStride, size_t startOffset) { size_t i = blockDim.x * blockIdx.x + threadIdx.x; size_t maxElems = numElems*batchSize; for (; i < maxElems; i += size_t(blockDim.x*gridDim.x)) { size_t batchOffset = i/numElems; size_t elemOffset = i - (batchOffset*numElems); int outOffset = startOffset + batchOffset*outStride; out[elemOffset + outOffset] = in[i]; } } void MWDepthConcatenationLayerImpl::predict() { int outputOffset = 0; MWTensor* opTensor = getLayer()->getOutputTensor(0); int outputStridePerBatch = opTensor->getHeight()*opTensor->getWidth()*opTensor->getChannels(); for (int k = 0; k < getLayer()->getNumInputs(); k++) { MWTensor* ipTensor = getLayer()->getInputTensor(k); int fSKMHAqIghbYYgyIpNDw = ipTensor->getBatchSize()* ipTensor->getHeight()* ipTensor->getWidth()* ipTensor->getChannels(); int tnTPxeDjBsqLAPkJcPJX = ::ceil(static_cast<float>(fSKMHAqIghbYYgyIpNDw)/static_cast<float>(32))*32; tnTPxeDjBsqLAPkJcPJX = (tnTPxeDjBsqLAPkJcPJX < 1024) ? tnTPxeDjBsqLAPkJcPJX : 1024; int MNuwXDSoGEYeABeVTwOh = (fSKMHAqIghbYYgyIpNDw + tnTPxeDjBsqLAPkJcPJX - 1)/tnTPxeDjBsqLAPkJcPJX; int numElemsPerBatch = ipTensor->getHeight()*ipTensor->getWidth()*ipTensor->getChannels(); hipLaunchKernelGGL(( concatImpl), dim3(MNuwXDSoGEYeABeVTwOh), dim3(tnTPxeDjBsqLAPkJcPJX), 0, 0, ipTensor->getData(), getData(), numElemsPerBatch, ipTensor->getBatchSize(), outputStridePerBatch, outputOffset); outputOffset += numElemsPerBatch; } } void MWDepthConcatenationLayerImpl::cleanup() { CUDNN_CALL(cudnnDestroyTensorDescriptor(*getOutputDescriptor(0))); if(PtkeOkuClHzhOfpmBevf) { if(getLayer()->getOutputTensor(0)->getopBufIndex() < 0) CUDA_FREE_CALL(PtkeOkuClHzhOfpmBevf); } }
e90ac3b2b82009fbe14f31c6ef6f90015af761e3.cu
#include "MWDepthConcatenationLayerImpl.hpp" #include "MWDepthConcatenationLayer.hpp" #include "MWTargetNetworkImpl.hpp" #include <stdarg.h> #include <cassert> MWDepthConcatenationLayerImpl::MWDepthConcatenationLayerImpl(MWCNNLayer* layer, MWTargetNetworkImpl* ntwk_impl, int outbufIdx) : MWCNNLayerImpl(layer, ntwk_impl) { createDepthConcatenationLayer(outbufIdx); } MWDepthConcatenationLayerImpl::~MWDepthConcatenationLayerImpl() { } void MWDepthConcatenationLayerImpl::createDepthConcatenationLayer(int outbufIdx) { MWTensor* opTensor = getLayer()->getOutputTensor(0); if (outbufIdx < 0) { CUDA_CALL(cudaMalloc((void**)&PtkeOkuClHzhOfpmBevf, sizeof(float)*opTensor->getHeight()*opTensor->getWidth()*opTensor->getChannels()*opTensor->getBatchSize())); } else { setData(enPbWLzEmxYCBmzGJutZ->memBuffer[outbufIdx]); opTensor->setopBufIndex(outbufIdx); } CUDNN_CALL(cudnnCreateTensorDescriptor(getOutputDescriptor(0))); CUDNN_CALL(cudnnSetTensor4dDescriptor(*getOutputDescriptor(0), CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, opTensor->getBatchSize(), opTensor->getChannels(), opTensor->getHeight(), opTensor->getWidth())); } void __global__ __launch_bounds__(1024) concatImpl(float* in, float* out, size_t numElems, size_t batchSize, size_t outStride, size_t startOffset) { size_t i = blockDim.x * blockIdx.x + threadIdx.x; size_t maxElems = numElems*batchSize; for (; i < maxElems; i += size_t(blockDim.x*gridDim.x)) { size_t batchOffset = i/numElems; size_t elemOffset = i - (batchOffset*numElems); int outOffset = startOffset + batchOffset*outStride; out[elemOffset + outOffset] = in[i]; } } void MWDepthConcatenationLayerImpl::predict() { int outputOffset = 0; MWTensor* opTensor = getLayer()->getOutputTensor(0); int outputStridePerBatch = opTensor->getHeight()*opTensor->getWidth()*opTensor->getChannels(); for (int k = 0; k < getLayer()->getNumInputs(); k++) { MWTensor* ipTensor = getLayer()->getInputTensor(k); int fSKMHAqIghbYYgyIpNDw = ipTensor->getBatchSize()* ipTensor->getHeight()* ipTensor->getWidth()* ipTensor->getChannels(); int tnTPxeDjBsqLAPkJcPJX = std::ceil(static_cast<float>(fSKMHAqIghbYYgyIpNDw)/static_cast<float>(32))*32; tnTPxeDjBsqLAPkJcPJX = (tnTPxeDjBsqLAPkJcPJX < 1024) ? tnTPxeDjBsqLAPkJcPJX : 1024; int MNuwXDSoGEYeABeVTwOh = (fSKMHAqIghbYYgyIpNDw + tnTPxeDjBsqLAPkJcPJX - 1)/tnTPxeDjBsqLAPkJcPJX; int numElemsPerBatch = ipTensor->getHeight()*ipTensor->getWidth()*ipTensor->getChannels(); concatImpl<<<MNuwXDSoGEYeABeVTwOh, tnTPxeDjBsqLAPkJcPJX>>>(ipTensor->getData(), getData(), numElemsPerBatch, ipTensor->getBatchSize(), outputStridePerBatch, outputOffset); outputOffset += numElemsPerBatch; } } void MWDepthConcatenationLayerImpl::cleanup() { CUDNN_CALL(cudnnDestroyTensorDescriptor(*getOutputDescriptor(0))); if(PtkeOkuClHzhOfpmBevf) { if(getLayer()->getOutputTensor(0)->getopBufIndex() < 0) CUDA_FREE_CALL(PtkeOkuClHzhOfpmBevf); } }
861e9e7e9972b424147ca29b0da9011da6d921d5.hip
// !!! This is a file automatically generated by hipify!!! /* * ============================================================================ * * Authors: Prashant Pandey <ppandey@cs.stonybrook.edu> * Rob Johnson <robj@vmware.com> * Hunter McCoy <hjmccoy@lbl.gov> * * ============================================================================ */ #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdlib.h> #include <assert.h> #include <string.h> #include <inttypes.h> #include <stdio.h> #include <unistd.h> #include <math.h> #include <time.h> #include <sys/mman.h> #include <sys/stat.h> #include <fcntl.h> //timing stuff #include <chrono> #include <iostream> #include <cmath> //how fast is a thrust sort? #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/fill.h> #include <thrust/memory.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include "hashutil.cuh" #include "gqf.cuh" #include "gqf_int.cuh" #include <stdexcept> #include <hip/hip_runtime_api.h> /****************************************************************** * Code for managing the metadata bits and slots w/o interpreting * * the content of the slots. ******************************************************************/ #define MAX_VALUE(nbits) ((1ULL << (nbits)) - 1) #define BITMASK(nbits) \ ((nbits) == 64 ? 0xffffffffffffffff : MAX_VALUE(nbits)) #define NUM_SLOTS_TO_LOCK (1ULL<<13) #define LOCK_DIST 64 #define EXP_BEFORE_FAILURE -15 #define CLUSTER_SIZE (1ULL<<14) #define METADATA_WORD(qf,field,slot_index) \ (get_block((qf), (slot_index) / QF_SLOTS_PER_BLOCK)->field[((slot_index) % QF_SLOTS_PER_BLOCK) / 64]) #define GET_NO_LOCK(flag) (flag & QF_NO_LOCK) #define GET_TRY_ONCE_LOCK(flag) (flag & QF_TRY_ONCE_LOCK) #define GET_WAIT_FOR_LOCK(flag) (flag & QF_WAIT_FOR_LOCK) #define GET_KEY_HASH(flag) (flag & QF_KEY_IS_HASH) #define NUM_BUFFERS 10 #define MAX_BUFFER_SIZE 100 #define CYCLES_PER_SECOND 1601000000 #define MAX_DEPTH 16 #define SELECT_BOUND 32 #define DEBUG_ASSERTS 0 #define DROP_ON_RUNEND 0 #define RUNEND_CUTOFF 15 #define DROP_ON_BIG_CLUSTER 0 #define BIG_CLUSTER_DROPOFF 4096 #define DISTANCE_FROM_HOME_SLOT_CUTOFF 1000 #define BILLION 1000000000L #define CUDA_CHECK(ans) \ gpuAssert((ans), __FILE__, __LINE__); inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true) { if (code != hipSuccess) { printf("GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __constant__ char kmer_vals[6] = {'F', 'A', 'C', 'T', 'G', '0'}; #ifdef DEBUG #define PRINT_DEBUG 1 #else #define PRINT_DEBUG 0 #endif #define DEBUG_CQF(fmt, ...) \ do { if (PRINT_DEBUG) printf( fmt, __VA_ARGS__); } while (0) #define DEBUG_DUMP(qf) \ do { if (PRINT_DEBUG) qf_dump_metadata(qf); } while (0) #if QF_BITS_PER_SLOT > 0 __host__ __device__ static inline qfblock* get_block(const QF* qf, uint64_t block_index) { return &qf->blocks[block_index]; } #else __host__ __device__ static inline qfblock* get_block(const QF* qf, uint64_t block_index) { return (qfblock*)(((char*)qf->blocks) + block_index * (sizeof(qfblock) + QF_SLOTS_PER_BLOCK * qf->metadata->bits_per_slot / 8)); } #endif /* __device__ static __inline__ unsigned long long rdtsc(void) { unsigned hi, lo; __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi)); return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 ); } */ /* __host__ __device__ static void modify_metadata(pc_t *metadata, int cnt) { pc_add(metadata, cnt); return; } */ /*changing sizes of register based on https://docs.nvidia.com/cuda/inline-ptx-assembly/index.html l is for "l" = .u64 reg */ __host__ __device__ static inline int popcnt(uint64_t val) { #ifdef __CUDA_ARCH__ val = __popcll(val); #else #ifndef __x86_64 val = __builtin_popcount(val); #else asm("popcnt %[val], %[val]" : [val] "+r" (val) : : "cc"); #endif #endif return val; } // __device__ static inline int64_t bitscanreverse(uint64_t val) // { // if (val == 0) { // return -1; // } else { // asm("bsr %[val], %[val]" // : [val] "+l" (val) // : // : ); // return val; // } // } __host__ __device__ static inline int popcntv(const uint64_t val, int ignore) { if (ignore % 64) return popcnt (val & ~BITMASK(ignore % 64)); else return popcnt(val); } // Returns the number of 1s up to (and including) the pos'th bit // Bits are numbered from 0 __host__ __device__ static inline int bitrank(uint64_t val, int pos) { val = val & ((2ULL << pos) - 1); #ifdef __CUDA_ARCH__ val = __popcll(val); #else //quick fix for summit #ifndef __x86_64 val = __builtin_popcount(val); #else asm("popcnt %[val], %[val]" : [val] "+r" (val) : : "cc"); #endif #endif return val; } //moved dump functions __host__ __device__ static inline void qf_dump_block(const QF *qf, uint64_t i) { uint64_t j; printf("Block %llu Runs from %llu to %llu\n",i, i*QF_SLOTS_PER_BLOCK, (i+1)*QF_SLOTS_PER_BLOCK); printf("Offset: %-192d", get_block(qf, i)->offset); printf("\n"); for (j = 0; j < QF_SLOTS_PER_BLOCK; j++) printf("%02lx ", j); printf("\n"); for (j = 0; j < QF_SLOTS_PER_BLOCK; j++) printf(" %d ", (get_block(qf, i)->occupieds[j/64] & (1ULL << (j%64))) ? 1 : 0); printf("\n"); for (j = 0; j < QF_SLOTS_PER_BLOCK; j++) printf(" %d ", (get_block(qf, i)->runends[j/64] & (1ULL << (j%64))) ? 1 : 0); printf("\n"); #if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 for (j = 0; j < QF_SLOTS_PER_BLOCK; j++) printf("%02x ", get_block(qf, i)->slots[j]); #elif QF_BITS_PER_SLOT == 64 for (j = 0; j < QF_SLOTS_PER_BLOCK; j++) printf("%02lx ", get_block(qf, i)->slots[j]); #else for (j = 0; j < QF_SLOTS_PER_BLOCK * qf->metadata->bits_per_slot / 8; j++) printf("%02x ", get_block(qf, i)->slots[j]); #endif printf("\n"); printf("\n"); } __host__ __device__ void qf_dump_metadata(const QF *qf) { printf("Slots: %lu Occupied: %lu Elements: %lu Distinct: %lu\n", qf->metadata->nslots, qf->metadata->noccupied_slots, qf->metadata->nelts, qf->metadata->ndistinct_elts); printf("Key_bits: %lu Value_bits: %lu Remainder_bits: %lu Bits_per_slot: %lu\n", qf->metadata->key_bits, qf->metadata->value_bits, qf->metadata->key_remainder_bits, qf->metadata->bits_per_slot); } __host__ __device__ void qf_dump(const QF *qf) { uint64_t i; printf("%lu %lu %lu\n", qf->metadata->nblocks, qf->metadata->ndistinct_elts, qf->metadata->nelts); for (i = 0; i < qf->metadata->nblocks; i++) { qf_dump_block(qf, i); } } /** * Returns the position of the k-th 1 in the 64-bit word x. * k is 0-based, so k=0 returns the position of the first 1. * * Uses the broadword selection algorithm by Vigna [1], improved by Gog * and Petri [2] and Vigna [3]. * * [1] Sebastiano Vigna. Broadword Implementation of Rank/Select * Queries. WEA, 2008 * * [2] Simon Gog, Matthias Petri. Optimized succinct data * structures for massive data. Softw. Pract. Exper., 2014 * * [3] Sebastiano Vigna. MG4J 5.2.1. http://mg4j.di.unimi.it/ * The following code is taken from * https://github.com/facebook/folly/blob/b28186247104f8b90cfbe094d289c91f9e413317/folly/experimental/Select64.h */ __device__ __constant__ uint8_t gpukSelectInByte[2048] = { 8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 8, 8, 8, 1, 8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1, 8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 7, 7, 1, 7, 2, 2, 1, 7, 3, 3, 1, 3, 2, 2, 1, 7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 8, 8, 8, 8, 8, 8, 2, 8, 8, 8, 3, 8, 3, 3, 2, 8, 8, 8, 4, 8, 4, 4, 2, 8, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 5, 8, 5, 5, 2, 8, 5, 5, 3, 5, 3, 3, 2, 8, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 6, 8, 6, 6, 2, 8, 6, 6, 3, 6, 3, 3, 2, 8, 6, 6, 4, 6, 4, 4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 8, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 7, 8, 7, 7, 2, 8, 7, 7, 3, 7, 3, 3, 2, 8, 7, 7, 4, 7, 4, 4, 2, 7, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 5, 7, 5, 5, 2, 7, 5, 5, 3, 5, 3, 3, 2, 7, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 6, 7, 6, 6, 2, 7, 6, 6, 3, 6, 3, 3, 2, 7, 6, 6, 4, 6, 4, 4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 3, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 4, 8, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 3, 8, 8, 8, 5, 8, 5, 5, 4, 8, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 3, 8, 8, 8, 6, 8, 6, 6, 4, 8, 6, 6, 4, 6, 4, 4, 3, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 3, 8, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 3, 8, 8, 8, 7, 8, 7, 7, 4, 8, 7, 7, 4, 7, 4, 4, 3, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 3, 8, 7, 7, 5, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 3, 8, 7, 7, 6, 7, 6, 6, 4, 7, 6, 6, 4, 6, 4, 4, 3, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 3, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 4, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 4, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 4, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7 }; // const uint8_t hostkSelectInByte[2048] = { // 8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, // 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, // 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, // 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, // 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 7, 0, // 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, // 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, // 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, // 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, // 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 8, 8, 8, 1, // 8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1, 8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, // 2, 1, 8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, // 4, 3, 3, 1, 3, 2, 2, 1, 8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, // 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, // 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 7, 7, 1, 7, 2, // 2, 1, 7, 3, 3, 1, 3, 2, 2, 1, 7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, // 7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, // 3, 1, 3, 2, 2, 1, 7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1, // 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, // 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 8, 8, 8, 8, 8, 8, 2, // 8, 8, 8, 3, 8, 3, 3, 2, 8, 8, 8, 4, 8, 4, 4, 2, 8, 4, 4, 3, 4, 3, 3, 2, 8, 8, // 8, 5, 8, 5, 5, 2, 8, 5, 5, 3, 5, 3, 3, 2, 8, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, // 4, 3, 3, 2, 8, 8, 8, 6, 8, 6, 6, 2, 8, 6, 6, 3, 6, 3, 3, 2, 8, 6, 6, 4, 6, 4, // 4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 8, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, // 6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 7, 8, 7, 7, 2, 8, 7, // 7, 3, 7, 3, 3, 2, 8, 7, 7, 4, 7, 4, 4, 2, 7, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 5, // 7, 5, 5, 2, 7, 5, 5, 3, 5, 3, 3, 2, 7, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, // 3, 2, 8, 7, 7, 6, 7, 6, 6, 2, 7, 6, 6, 3, 6, 3, 3, 2, 7, 6, 6, 4, 6, 4, 4, 2, // 6, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5, // 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 3, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 4, 8, 4, 4, 3, 8, 8, 8, 8, 8, 8, // 8, 5, 8, 8, 8, 5, 8, 5, 5, 3, 8, 8, 8, 5, 8, 5, 5, 4, 8, 5, 5, 4, 5, 4, 4, 3, // 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 3, 8, 8, 8, 6, 8, 6, 6, 4, 8, 6, // 6, 4, 6, 4, 4, 3, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 3, 8, 6, 6, 5, // 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, // 7, 3, 8, 8, 8, 7, 8, 7, 7, 4, 8, 7, 7, 4, 7, 4, 4, 3, 8, 8, 8, 7, 8, 7, 7, 5, // 8, 7, 7, 5, 7, 5, 5, 3, 8, 7, 7, 5, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 3, 8, 8, // 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 3, 8, 7, 7, 6, 7, 6, 6, 4, 7, 6, 6, 4, // 6, 4, 4, 3, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 3, 7, 6, 6, 5, 6, 5, // 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 4, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, // 6, 4, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5, 8, 8, 8, 6, 8, 6, 6, 5, // 8, 6, 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, // 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 4, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, // 8, 7, 7, 5, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 4, 8, 8, 8, 8, 8, 8, // 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 4, // 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, // 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, // 8, 6, 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, // 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 8, // 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, // 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7 // }; __host__ __device__ static inline uint64_t _select64(uint64_t x, int k) { if (k >= popcnt(x)) { return 64; } const uint64_t kOnesStep4 = 0x1111111111111111ULL; const uint64_t kOnesStep8 = 0x0101010101010101ULL; const uint64_t kMSBsStep8 = 0x80ULL * kOnesStep8; uint64_t s = x; s = s - ((s & 0xA * kOnesStep4) >> 1); s = (s & 0x3 * kOnesStep4) + ((s >> 2) & 0x3 * kOnesStep4); s = (s + (s >> 4)) & 0xF * kOnesStep8; uint64_t byteSums = s * kOnesStep8; uint64_t kStep8 = k * kOnesStep8; uint64_t geqKStep8 = (((kStep8 | kMSBsStep8) - byteSums) & kMSBsStep8); uint64_t place = popcnt(geqKStep8) * 8; uint64_t byteRank = k - (((byteSums << 8) >> place) & (uint64_t)(0xFF)); #ifdef __CUDA_ARCH__ return place + gpukSelectInByte[((x >> place) & 0xFF) | (byteRank << 8)]; #else abort(); return 0; //return place + hostkSelectInByte[((x >> place) & 0xFF) | (byteRank << 8)]; #endif // __CUDA_ARCH__ } // Returns the position of the rank'th 1. (rank = 0 returns the 1st 1) // Returns 64 if there are fewer than rank+1 1s. __host__ __device__ static inline uint64_t bitselect(uint64_t val, int rank) { #ifdef __SSE4_2_ uint64_t i = 1ULL << rank; asm("pdep %[val], %[mask], %[val]" : [val] "+r" (val) : [mask] "r" (i)); asm("tzcnt %[bit], %[index]" : [index] "=r" (i) : [bit] "g" (val) : "cc"); return i; #endif return _select64(val, rank); } __host__ __device__ static inline uint64_t bitselectv(const uint64_t val, int ignore, int rank) { return bitselect(val & ~BITMASK(ignore % 64), rank); } __host__ __device__ static inline int is_runend(const QF *qf, uint64_t index) { return (METADATA_WORD(qf, runends, index) >> ((index % QF_SLOTS_PER_BLOCK) % 64)) & 1ULL; } __host__ __device__ static inline int is_occupied(const QF *qf, uint64_t index) { return (METADATA_WORD(qf, occupieds, index) >> ((index % QF_SLOTS_PER_BLOCK) % 64)) & 1ULL; } #if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64 __host__ __device__ static inline uint64_t get_slot(const QF *qf, uint64_t index) { //ERR: Index passed in is incorrect //printf("slots %lu, index %lu\n", qf->metadata->nslots, index); #if DEBUG_ASSERTS assert(index < qf->metadata->xnslots); #endif return get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[index % QF_SLOTS_PER_BLOCK]; } __host__ __device__ static inline void set_slot(const QF *qf, uint64_t index, uint64_t value) { #if DEBUG_ASSERTS assert(index < qf->metadata->xnslots); #endif get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[index % QF_SLOTS_PER_BLOCK] = value & BITMASK(qf->metadata->bits_per_slot); } #elif QF_BITS_PER_SLOT > 0 /* Little-endian code .... Big-endian is TODO */ __host__ __device__ static inline uint64_t get_slot(const QF *qf, uint64_t index) { /* Should use __uint128_t to support up to 64-bit remainders, but gcc seems * to generate buggy code. :/ */ //printf("Other get slot: slots %lu, index %lu\n", qf->metadata->nslots, index); #if DEBUG_ASSERTS assert(index < qf->metadata->xnslots); #endif uint64_t *p = (uint64_t *)&get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[(index % QF_SLOTS_PER_BLOCK) * QF_BITS_PER_SLOT / 8]; return (uint64_t)(((*p) >> (((index % QF_SLOTS_PER_BLOCK) * QF_BITS_PER_SLOT) % 8)) & BITMASK(QF_BITS_PER_SLOT)); } __host__ __device__ static inline void set_slot(const QF *qf, uint64_t index, uint64_t value) { /* Should use __uint128_t to support up to 64-bit remainders, but gcc seems * to generate buggy code. :/ */ #if DEBUG_ASSERTS assert(index < qf->metadata->xnslots); #endif uint64_t *p = (uint64_t *)&get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[(index % QF_SLOTS_PER_BLOCK) * QF_BITS_PER_SLOT / 8]; uint64_t t = *p; uint64_t mask = BITMASK(QF_BITS_PER_SLOT); uint64_t v = value; int shift = ((index % QF_SLOTS_PER_BLOCK) * QF_BITS_PER_SLOT) % 8; mask <<= shift; v <<= shift; t &= ~mask; t |= v; *p = t; } #else /* Little-endian code .... Big-endian is TODO */ __host__ __device__ static inline uint64_t get_slot(const QF *qf, uint64_t index) { //rintf("Third get slot?!? slots %lu, index %lu\n", qf->metadata->nslots, index); #if DEBUG_ASSERTS assert(index < qf->metadata->xnslots); #endif /* Should use __uint128_t to support up to 64-bit remainders, but gcc seems * to generate buggy code. :/ */ uint64_t *p = (uint64_t *)&get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[(index %QF_SLOTS_PER_BLOCK)* qf->metadata->bits_per_slot / 8]; return (uint64_t)(((*p) >> (((index % QF_SLOTS_PER_BLOCK) *qf->metadata->bits_per_slot) % 8)) & BITMASK(qf->metadata->bits_per_slot)); } __host__ __device__ static inline void set_slot(const QF *qf, uint64_t index, uint64_t value) { #if DEBUG_ASSERTS assert(index < qf->metadata->xnslots); #endif /* Should use __uint128_t to support up to 64-bit remainders, but gcc seems * to generate buggy code. :/ */ uint64_t *p = (uint64_t *)&get_block(qf, index /QF_SLOTS_PER_BLOCK)->slots[(index %QF_SLOTS_PER_BLOCK)* qf->metadata->bits_per_slot / 8]; uint64_t t = *p; uint64_t mask = BITMASK(qf->metadata->bits_per_slot); uint64_t v = value; int shift = ((index % QF_SLOTS_PER_BLOCK) * qf->metadata->bits_per_slot) % 8; mask <<= shift; v <<= shift; t &= ~mask; t |= v; *p = t; } #endif __host__ __device__ static inline uint64_t run_end(const QF *qf, uint64_t hash_bucket_index); __host__ __device__ static inline uint64_t block_offset(const QF *qf, uint64_t blockidx) { /* If we have extended counters and a 16-bit (or larger) offset field, then we can safely ignore the possibility of overflowing that field. */ if (sizeof(qf->blocks[0].offset) > 1 || get_block(qf, blockidx)->offset < BITMASK(8*sizeof(qf->blocks[0].offset))) return get_block(qf, blockidx)->offset; return run_end(qf, QF_SLOTS_PER_BLOCK * blockidx - 1) - QF_SLOTS_PER_BLOCK * blockidx + 1; } __host__ __device__ static inline uint64_t run_end(const QF *qf, uint64_t hash_bucket_index) { uint64_t bucket_block_index = hash_bucket_index / QF_SLOTS_PER_BLOCK; uint64_t bucket_intrablock_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK; uint64_t bucket_blocks_offset = block_offset(qf, bucket_block_index); uint64_t bucket_intrablock_rank = bitrank(get_block(qf, bucket_block_index)->occupieds[0], bucket_intrablock_offset); if (bucket_intrablock_rank == 0) { if (bucket_blocks_offset <= bucket_intrablock_offset) return hash_bucket_index; else return QF_SLOTS_PER_BLOCK * bucket_block_index + bucket_blocks_offset - 1; } uint64_t runend_block_index = bucket_block_index + bucket_blocks_offset / QF_SLOTS_PER_BLOCK; uint64_t runend_ignore_bits = bucket_blocks_offset % QF_SLOTS_PER_BLOCK; uint64_t runend_rank = bucket_intrablock_rank - 1; uint64_t runend_block_offset = bitselectv(get_block(qf, runend_block_index)->runends[0], runend_ignore_bits, runend_rank); if (runend_block_offset == QF_SLOTS_PER_BLOCK) { if (bucket_blocks_offset == 0 && bucket_intrablock_rank == 0) { /* The block begins in empty space, and this bucket is in that region of * empty space */ return hash_bucket_index; } else { do { runend_rank -= popcntv(get_block(qf, runend_block_index)->runends[0], runend_ignore_bits); runend_block_index++; runend_ignore_bits = 0; runend_block_offset = bitselectv(get_block(qf, runend_block_index)->runends[0], runend_ignore_bits, runend_rank); } while (runend_block_offset == QF_SLOTS_PER_BLOCK); } } uint64_t runend_index = QF_SLOTS_PER_BLOCK * runend_block_index + runend_block_offset; if (runend_index < hash_bucket_index) return hash_bucket_index; else return runend_index; } __host__ __device__ static inline int offset_lower_bound(const QF *qf, uint64_t slot_index) { const qfblock * b = get_block(qf, slot_index / QF_SLOTS_PER_BLOCK); const uint64_t slot_offset = slot_index % QF_SLOTS_PER_BLOCK; const uint64_t boffset = b->offset; const uint64_t occupieds = b->occupieds[0] & BITMASK(slot_offset+1); //printf("slot %llu, slot_offset %02lx, block offset %llu, occupieds: %d ", slot_index, slot_offset, boffset, popcnt(occupieds)); #if DEBUG_ASSERTS assert(QF_SLOTS_PER_BLOCK == 64); #endif //if (boffset < slot_offset) { if (boffset <= slot_offset) { const uint64_t runends = (b->runends[0] & BITMASK(slot_offset)) >> boffset; //printf(" runends %d\n", popcnt(runends)); //printf("boffset < slot_offset, runends %llu, popcnt(occupieds) %d, popcnt(runends) %d\n", runends, popcnt(occupieds), popcnt(runends)); //printf("returning %d\n", popcnt(occupieds)-popcnt(runends)); return popcnt(occupieds) - popcnt(runends); } //printf("\n"); //printf("boffset > slot_offset, boffset-slotoffset %llu, popcnt(occupieds) %d\n", boffset-slot_offset, popcnt(occupieds)); //printf("returning %d\n", boffset-slot_offset+popcnt(occupieds)); return boffset - slot_offset + popcnt(occupieds); } __host__ __device__ static inline int is_empty(const QF *qf, uint64_t slot_index) { return offset_lower_bound(qf, slot_index) == 0; } __host__ __device__ static inline int might_be_empty(const QF *qf, uint64_t slot_index) { return !is_occupied(qf, slot_index) && !is_runend(qf, slot_index); } // __device__ static inline int probably_is_empty(const QF *qf, uint64_t slot_index) // { // return get_slot(qf, slot_index) == 0 // && !is_occupied(qf, slot_index) // && !is_runend(qf, slot_index); // } //static inlines were re-added, should __host__ __device__ uint64_t static inline find_first_empty_slot(QF *qf, uint64_t from) { uint64_t start_from = from; do { int t = offset_lower_bound(qf, from); //get block of from // if (t < 0){ // //this implies a failure in the code - you are going to // find_first_empty_slot_verbose(qf, start_from); // } //this assert breaks testing as we can't query the last slot for the next slot //this throws an assertion, instead we want to throw an out of range exception //that can be captured to finalize the test instead. #if DEBUG_ASSERTS assert(t>=0); #endif //assert must happen, checks cannot happen in device code //alternate version must exist that is host exclusive. //if (t < 0) throw std::out_of_range("next free slot is before current slot, either final slot or gqf corruption.\n"); if (t == 0) break; from = from + t; } while(1); uint64_t bucket_start_from = start_from/NUM_SLOTS_TO_LOCK; uint64_t end_start_from = from/NUM_SLOTS_TO_LOCK; //testing without this gate to check if we see speed improvements // if (end_start_from>bucket_start_from+1){ // //return -1; // printf("Find first empty ran over a bucket: %llu\n", end_start_from-bucket_start_from); // } return from; } __host__ __device__ uint64_t first_empty_slot_wrapper(QF * qf, uint64_t from){ return find_first_empty_slot(qf, from); } //exact same function as above, but forced to be host exclusive so that a try_catch statement in cluster counting will succeed. __host__ uint64_t host_debug_find_first_empty_slot(QF *qf, uint64_t from) { uint64_t start_from = from; do { int t = offset_lower_bound(qf, from); //get block of from // if (t < 0){ // //this implies a failure in the code - you are going to // find_first_empty_slot_verbose(qf, start_from); // } //this assert breaks testing as we can't query the last slot for the next slot //this throws an assertion, instead we want to throw an out of range exception //that can be captured to finalize the test instead. //assert(t>=0); //assert must happen, checks cannot happen in device code //alternate version must exist that is host exclusive. if (t < 0) throw std::out_of_range("next free slot is before current slot, either final slot or gqf corruption.\n"); if (t == 0) break; from = from + t; } while(1); uint64_t bucket_start_from = start_from/NUM_SLOTS_TO_LOCK; uint64_t end_start_from = from/NUM_SLOTS_TO_LOCK; //testing without this gate to check if we see speed improvements if (end_start_from>bucket_start_from+1){ printf("Find first empty ran over a bucket: %llu\n", end_start_from-bucket_start_from); } return from; } __host__ __device__ static inline uint64_t shift_into_b(const uint64_t a, const uint64_t b, const int bstart, const int bend, const int amount) { const uint64_t a_component = bstart == 0 ? (a >> (64 - amount)) : 0; const uint64_t b_shifted_mask = BITMASK(bend - bstart) << bstart; const uint64_t b_shifted = ((b_shifted_mask & b) << amount) & b_shifted_mask; const uint64_t b_mask = ~b_shifted_mask; return a_component | b_shifted | (b & b_mask); } // __device__ void* gpu_memmove(void* dst, const void* src, size_t n) // { // //printf("Launching memmove\n"); // //todo: allocate space per thread for this buffer before launching the kernel // void* temp_buffer = malloc(n); // //maybe stack allocation? // //void* temp_buffer = void* char[n]; // // hipMemcpyAsync(temp_buffer, src, n, hipMemcpyDeviceToDevice); // // hipMemcpyAsync(dst, temp_buffer, n, hipMemcpyDeviceToDevice); // // //hipFree(temp_buffer); // // return dst; // memcpy(temp_buffer, src, n); // memcpy(dst, temp_buffer, n); // free(temp_buffer); // } //a variant of memmove that compares the two pointers __device__ void gpu_memmove(void* dst, const void* src, size_t n) { //printf("Launching memmove\n"); //todo: allocate space per thread for this buffer before launching the kernel char * char_dst = (char *) dst; char * char_src = (char *) src; //double check this, //think it is just > since dst+n does not get copied if (char_src+n > char_dst){ //copy backwards for (int i =n-1; i >= 0; i--){ char_dst[i] = char_src[i]; } } else { //copy regular for (int i =0; i<n; i++){ char_dst[i] = char_src[i]; } } //free(temp_buffer); } //a variant of memmove that compares the two pointers __device__ void gpu_memmove_cooperative(void* dst, const void* src, size_t n, int warpID) { //printf("Launching memmove\n"); //todo: allocate space per thread for this buffer before launching the kernel char * char_dst = (char *) dst; char * char_src = (char *) src; //double check this, //think it is just > since dst+n does not get copied if (char_src+n > char_dst){ //copy backwards for (int i =n-1-warpID; i >= 0; i-=32){ char_dst[i] = char_src[i]; } } else { //copy regular for (int i =warpID; i<n; i+=32){ char_dst[i] = char_src[i]; } } //free(temp_buffer); } #if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64 __host__ __device__ static inline void shift_remainders(QF *qf, uint64_t start_index, uint64_t empty_index) { uint64_t start_block = start_index / QF_SLOTS_PER_BLOCK; uint64_t start_offset = start_index % QF_SLOTS_PER_BLOCK; uint64_t empty_block = empty_index / QF_SLOTS_PER_BLOCK; uint64_t empty_offset = empty_index % QF_SLOTS_PER_BLOCK; #if DEBUG_ASSERTS assert (start_index <= empty_index); assert (empty_index < qf->metadata->xnslots); #endif while (start_block < empty_block) { #ifdef __CUDA_ARCH__ gpu_memmove(&get_block(qf, empty_block)->slots[1], &get_block(qf, empty_block)->slots[0], empty_offset * sizeof(qf->blocks[0].slots[0])); #else memmove(&get_block(qf, empty_block)->slots[1], &get_block(qf, empty_block)->slots[0], empty_offset * sizeof(qf->blocks[0].slots[0])); #endif get_block(qf, empty_block)->slots[0] = get_block(qf, empty_block-1)->slots[QF_SLOTS_PER_BLOCK-1]; empty_block--; empty_offset = QF_SLOTS_PER_BLOCK-1; } #ifdef __CUDA_ARCH__ gpu_memmove(&get_block(qf, empty_block)->slots[start_offset + 1], &get_block(qf, empty_block)->slots[start_offset], (empty_offset - start_offset) * sizeof(qf->blocks[0].slots[0])); #else memmove(&get_block(qf, empty_block)->slots[start_offset+1], &get_block(qf, empty_block)->slots[start_offset], (empty_offset - start_offset) * sizeof(qf->blocks[0].slots[0])); #endif } __device__ static inline void shift_remainders_cooperative(QF *qf, uint64_t start_index, uint64_t empty_index, int warpID) { uint64_t start_block = start_index / QF_SLOTS_PER_BLOCK; uint64_t start_offset = start_index % QF_SLOTS_PER_BLOCK; uint64_t empty_block = empty_index / QF_SLOTS_PER_BLOCK; uint64_t empty_offset = empty_index % QF_SLOTS_PER_BLOCK; #if DEBUG_ASSERTS assert (start_index <= empty_index); assert (empty_index < qf->metadata->xnslots); #endif while (start_block < empty_block) { gpu_memmove_cooperative(&get_block(qf, empty_block)->slots[1], &get_block(qf, empty_block)->slots[0], empty_offset * sizeof(qf->blocks[0].slots[0]), warpID); get_block(qf, empty_block)->slots[0] = get_block(qf, empty_block-1)->slots[QF_SLOTS_PER_BLOCK-1]; empty_block--; empty_offset = QF_SLOTS_PER_BLOCK-1; } gpu_memmove_cooperative(&get_block(qf, empty_block)->slots[start_offset + 1], &get_block(qf, empty_block)->slots[start_offset], (empty_offset - start_offset) * sizeof(qf->blocks[0].slots[0]), warpID); } #else #define REMAINDER_WORD(qf, i) ((uint64_t *)&(get_block(qf, (i)/qf->metadata->bits_per_slot)->slots[8 * ((i) % qf->metadata->bits_per_slot)])) __host__ __device__ static inline void shift_remainders(QF *qf, const uint64_t start_index, const uint64_t empty_index) { uint64_t last_word = (empty_index + 1) * qf->metadata->bits_per_slot / 64; const uint64_t first_word = start_index * qf->metadata->bits_per_slot / 64; int bend = ((empty_index + 1) * qf->metadata->bits_per_slot) % 64; const int bstart = (start_index * qf->metadata->bits_per_slot) % 64; while (last_word != first_word) { *REMAINDER_WORD(qf, last_word) = shift_into_b(*REMAINDER_WORD(qf, last_word-1), *REMAINDER_WORD(qf, last_word), 0, bend, qf->metadata->bits_per_slot); last_word--; bend = 64; } *REMAINDER_WORD(qf, last_word) = shift_into_b(0, *REMAINDER_WORD(qf, last_word), bstart, bend, qf->metadata->bits_per_slot); } #endif __host__ __device__ static inline void find_next_n_empty_slots(QF *qf, uint64_t from, uint64_t n, uint64_t *indices) { while (n) { indices[--n] = find_first_empty_slot(qf, from); from = indices[n] + 1; } } __host__ __device__ static inline void shift_slots(QF *qf, int64_t first, uint64_t last, uint64_t distance) { int64_t i; if (distance == 1) shift_remainders(qf, first, last+1); else for (i = last; i >= first; i--) set_slot(qf, i + distance, get_slot(qf, i)); } __host__ __device__ static inline void shift_runends(QF *qf, int64_t first, uint64_t last, uint64_t distance) { #if DEBUG_ASSERTS assert(last < qf->metadata->xnslots && distance < 64); #endif uint64_t first_word = first / 64; uint64_t bstart = first % 64; uint64_t last_word = (last + distance + 1) / 64; uint64_t bend = (last + distance + 1) % 64; if (last_word != first_word) { METADATA_WORD(qf, runends, 64*last_word) = shift_into_b(METADATA_WORD(qf, runends, 64*(last_word-1)), METADATA_WORD(qf, runends, 64*last_word), 0, bend, distance); bend = 64; last_word--; while (last_word != first_word) { METADATA_WORD(qf, runends, 64*last_word) = shift_into_b(METADATA_WORD(qf, runends, 64*(last_word-1)), METADATA_WORD(qf, runends, 64*last_word), 0, bend, distance); last_word--; } } METADATA_WORD(qf, runends, 64*last_word) = shift_into_b(0, METADATA_WORD(qf, runends, 64*last_word), bstart, bend, distance); } __host__ __device__ static inline bool insert_replace_slots_and_shift_remainders_and_runends_and_offsets(QF *qf, int operation, uint64_t bucket_index, uint64_t overwrite_index, const uint64_t *remainders, uint64_t total_remainders, uint64_t noverwrites) { uint64_t empties[67]; uint64_t i; int64_t j; int64_t ninserts = total_remainders - noverwrites; uint64_t insert_index = overwrite_index + noverwrites; if (ninserts > 0) { /* First, shift things to create n empty spaces where we need them. */ find_next_n_empty_slots(qf, insert_index, ninserts, empties); if (empties[0] >= qf->metadata->xnslots) { return false; } for (j = 0; j < ninserts - 1; j++) shift_slots(qf, empties[j+1] + 1, empties[j] - 1, j + 1); shift_slots(qf, insert_index, empties[ninserts - 1] - 1, ninserts); for (j = 0; j < ninserts - 1; j++) shift_runends(qf, empties[j+1] + 1, empties[j] - 1, j + 1); shift_runends(qf, insert_index, empties[ninserts - 1] - 1, ninserts); for (i = noverwrites; i < total_remainders - 1; i++) METADATA_WORD(qf, runends, overwrite_index + i) &= ~(1ULL << (((overwrite_index + i) % QF_SLOTS_PER_BLOCK) % 64)); switch (operation) { case 0: /* insert into empty bucket */ #if DEBUG_ASSERTS assert (noverwrites == 0); #endif METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) |= 1ULL << (((overwrite_index + total_remainders - 1) % QF_SLOTS_PER_BLOCK) % 64); break; case 1: /* append to bucket */ METADATA_WORD(qf, runends, overwrite_index + noverwrites - 1) &= ~(1ULL << (((overwrite_index + noverwrites - 1) % QF_SLOTS_PER_BLOCK) % 64)); METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) |= 1ULL << (((overwrite_index + total_remainders - 1) % QF_SLOTS_PER_BLOCK) % 64); break; case 2: /* insert into bucket */ METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) &= ~(1ULL << (((overwrite_index + total_remainders - 1) % QF_SLOTS_PER_BLOCK) % 64)); break; default: printf("Invalid operation %d\n", operation); #ifdef __CUDA_ARCH__ __threadfence(); // ensure store issued before trap asm("trap;"); #else abort(); #endif } uint64_t npreceding_empties = 0; for (i = bucket_index / QF_SLOTS_PER_BLOCK + 1; i <= empties[0]/QF_SLOTS_PER_BLOCK; i++) { while ((int64_t)npreceding_empties < ninserts && empties[ninserts - 1 - npreceding_empties] / QF_SLOTS_PER_BLOCK < i) npreceding_empties++; if (get_block(qf, i)->offset + ninserts - npreceding_empties < BITMASK(8*sizeof(qf->blocks[0].offset))) get_block(qf, i)->offset += ninserts - npreceding_empties; else get_block(qf, i)->offset = (uint8_t) BITMASK(8*sizeof(qf->blocks[0].offset)); } } for (i = 0; i < total_remainders; i++) set_slot(qf, overwrite_index + i, remainders[i]); //modify_metadata(&qf->runtimedata->pc_noccupied_slots, ninserts); return true; } __host__ __device__ static inline int remove_replace_slots_and_shift_remainders_and_runends_and_offsets(QF *qf, int operation, uint64_t bucket_index, uint64_t overwrite_index, const uint64_t *remainders, uint64_t total_remainders, uint64_t old_length) { uint64_t i; // Update the slots for (i = 0; i < total_remainders; i++) set_slot(qf, overwrite_index + i, remainders[i]); // If this is the last thing in its run, then we may need to set a new runend bit if (is_runend(qf, overwrite_index + old_length - 1)) { if (total_remainders > 0) { // If we're not deleting this entry entirely, then it will still the last entry in this run METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) |= 1ULL << ((overwrite_index + total_remainders - 1) % 64); } else if (overwrite_index > bucket_index && !is_runend(qf, overwrite_index - 1)) { // If we're deleting this entry entirely, but it is not the first entry in this run, // then set the preceding entry to be the runend METADATA_WORD(qf, runends, overwrite_index - 1) |= 1ULL << ((overwrite_index - 1) % 64); } } // shift slots back one run at a time uint64_t original_bucket = bucket_index; uint64_t current_bucket = bucket_index; uint64_t current_slot = overwrite_index + total_remainders; uint64_t current_distance = old_length - total_remainders; int ret_current_distance = current_distance; while (current_distance > 0) { if (is_runend(qf, current_slot + current_distance - 1)) { do { current_bucket++; } while (current_bucket < current_slot + current_distance && !is_occupied(qf, current_bucket)); } if (current_bucket <= current_slot) { set_slot(qf, current_slot, get_slot(qf, current_slot + current_distance)); if (is_runend(qf, current_slot) != is_runend(qf, current_slot + current_distance)) METADATA_WORD(qf, runends, current_slot) ^= 1ULL << (current_slot % 64); current_slot++; } else if (current_bucket <= current_slot + current_distance) { uint64_t i; for (i = current_slot; i < current_slot + current_distance; i++) { set_slot(qf, i, 0); METADATA_WORD(qf, runends, i) &= ~(1ULL << (i % 64)); } current_distance = current_slot + current_distance - current_bucket; current_slot = current_bucket; } else { current_distance = 0; } } // reset the occupied bit of the hash bucket index if the hash is the // only item in the run and is removed completely. if (operation && !total_remainders) METADATA_WORD(qf, occupieds, bucket_index) &= ~(1ULL << (bucket_index % 64)); // update the offset bits. // find the number of occupied slots in the original_bucket block. // Then find the runend slot corresponding to the last run in the // original_bucket block. // Update the offset of the block to which it belongs. uint64_t original_block = original_bucket / QF_SLOTS_PER_BLOCK; if (old_length > total_remainders) { // we only update offsets if we shift/delete anything while (1) { uint64_t last_occupieds_hash_index = QF_SLOTS_PER_BLOCK * original_block + (QF_SLOTS_PER_BLOCK - 1); uint64_t runend_index = run_end(qf, last_occupieds_hash_index); // runend spans across the block // update the offset of the next block if (runend_index / QF_SLOTS_PER_BLOCK == original_block) { // if the run ends in the same block if (get_block(qf, original_block + 1)->offset == 0) break; get_block(qf, original_block + 1)->offset = 0; } else { // if the last run spans across the block if (get_block(qf, original_block + 1)->offset == (runend_index - last_occupieds_hash_index)) break; get_block(qf, original_block + 1)->offset = (runend_index - last_occupieds_hash_index); } original_block++; } } //int num_slots_freed = old_length - total_remainders; //modify_metadata(&qf->runtimedata->pc_noccupied_slots, -num_slots_freed); /*qf->metadata->noccupied_slots -= (old_length - total_remainders);*/ if (!total_remainders) { //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, -1); /*qf->metadata->ndistinct_elts--;*/ } return ret_current_distance; } /***************************************************************************** * Code that uses the above to implement a QF with keys and inline counters. * *****************************************************************************/ /* Counter format: 0 xs: <empty string> 1 x: x 2 xs: xx 3 0s: 000 >2 xs: xbc...cx for x != 0, b < x, c != 0, x >3 0s: 0c...c00 for c != 0 */ __host__ __device__ static inline uint64_t *encode_counter(QF *qf, uint64_t remainder, uint64_t counter, uint64_t *slots) { uint64_t digit = remainder; uint64_t base = (1ULL << qf->metadata->bits_per_slot) - 1; uint64_t *p = slots; if (counter == 0) return p; *--p = remainder; if (counter == 1) return p; if (counter == 2) { *--p = remainder; return p; } if (counter == 3 && remainder == 0) { *--p = remainder; *--p = remainder; return p; } if (counter == 3 && remainder > 0) { *--p = 0; *--p = remainder; return p; } if (remainder == 0) *--p = remainder; else base--; if (remainder) counter -= 3; else counter -= 4; do { digit = counter % base; digit++; /* Zero not allowed */ if (remainder && digit >= remainder) digit++; /* Cannot overflow since digit is mod 2^r-2 */ *--p = digit; counter /= base; } while (counter); if (remainder && digit >= remainder) *--p = 0; *--p = remainder; return p; } /* Returns the length of the encoding. REQUIRES: index points to first slot of a counter. */ __host__ __device__ static inline uint64_t decode_counter(const QF *qf, uint64_t index, uint64_t *remainder, uint64_t *count) { uint64_t base; uint64_t rem; uint64_t cnt; uint64_t digit; uint64_t end; *remainder = rem = get_slot(qf, index); if (is_runend(qf, index)) { /* Entire run is "0" */ *count = 1; return index; } digit = get_slot(qf, index + 1); if (is_runend(qf, index + 1)) { *count = digit == rem ? 2 : 1; return index + (digit == rem ? 1 : 0); } if (rem > 0 && digit >= rem) { *count = digit == rem ? 2 : 1; return index + (digit == rem ? 1 : 0); } if (rem > 0 && digit == 0 && get_slot(qf, index + 2) == rem) { *count = 3; return index + 2; } if (rem == 0 && digit == 0) { if (get_slot(qf, index + 2) == 0) { *count = 3; return index + 2; } else { *count = 2; return index + 1; } } cnt = 0; base = (1ULL << qf->metadata->bits_per_slot) - (rem ? 2 : 1); end = index + 1; while (digit != rem && !is_runend(qf, end)) { if (digit > rem) digit--; if (digit && rem) digit--; cnt = cnt * base + digit; end++; digit = get_slot(qf, end); } if (rem) { *count = cnt + 3; return end; } if (is_runend(qf, end) || get_slot(qf, end + 1) != 0) { *count = 1; return index; } *count = cnt + 4; return end + 1; } /* return the next slot which corresponds to a * different element * */ // __device__ static inline uint64_t next_slot(QF *qf, uint64_t current) // { // uint64_t rem = get_slot(qf, current); // current++; // while (get_slot(qf, current) == rem && current <= qf->metadata->nslots) { // current++; // } // return current; // } //code for approx inserts __host__ __device__ static inline qf_returns insert1_if_not_exists(QF *qf, __uint64_t hash, uint8_t * value) { uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK; uint64_t compare_remainder = hash_remainder >> qf->metadata->value_bits; /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { if (!qf_lock(qf, hash_bucket_index, true, runtime_lock)) return QF_COULDNT_LOCK; } */ //printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder); //approx filter has estimate of only one insert per item // #ifdef __CUDA_ARCH__ // atomicAdd((unsigned long long *)&qf->metadata->noccupied_slots, 1ULL); // #else // abort(); // #endif if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) { METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); set_slot(qf, hash_bucket_index, hash_remainder); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); //modify_metadata(&qf->runtimedata->pc_nelts, 1); } else { uint64_t runend_index = run_end(qf, hash_bucket_index); int operation = 0; /* Insert into empty bucket */ uint64_t insert_index = runend_index + 1; uint64_t new_value = hash_remainder; /* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1; if (is_occupied(qf, hash_bucket_index)) { /* Find the counter for this remainder if it exists. */ uint64_t current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits; //uint64_t zero_terminator = runstart_index; /* Skip over counters for other remainders. */ while (current_remainder < compare_remainder && runstart_index <= runend_index) { runstart_index++; current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits; } /* If this is the first time we've inserted the new remainder, and it is larger than any remainder in the run. */ if (runstart_index > runend_index) { operation = 1; insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* This is the first time we're inserting this remainder, but there are larger remainders already in the run. */ } else if (current_remainder != compare_remainder) { operation = 2; /* Inserting */ insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* Cases below here: we're incrementing the (simple or extended) counter for this remainder. */ /* If there's exactly one instance of this remainder. */ } else { //get remainder *value = get_slot(qf, runstart_index) && BITMASK(qf->metadata->value_bits); return QF_ITEM_FOUND; } } //else { //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //} if (operation >= 0) { uint64_t empty_slot_index = find_first_empty_slot(qf, runend_index+1); #if DROP_ON_BIG_CLUSTER // if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ // return QF_FULL; // } if (qf->metadata->qf_full){ return QF_FULL; } if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ qf->metadata->qf_full = true; return QF_FULL; } #endif if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){ return QF_FULL; } if (empty_slot_index >= qf->metadata->xnslots) { printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index); return QF_FULL; } shift_remainders(qf, insert_index, empty_slot_index); set_slot(qf, insert_index, new_value); //ret_distance = insert_index - hash_bucket_index; shift_runends(qf, insert_index, empty_slot_index-1, 1); switch (operation) { case 0: METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64); break; case 1: METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64)); METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64); break; case 2: METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64)); break; default: printf("Invalid operation %d\n", operation); #ifdef __CUDA_ARCH__ __threadfence(); // ensure store issued before trap asm("trap;"); #else abort(); #endif } /* * Increment the offset for each block between the hash bucket index * and block of the empty slot * */ uint64_t i; for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <= empty_slot_index/QF_SLOTS_PER_BLOCK; i++) { if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset))) get_block(qf, i)->offset++; #if DEBUG_ASSERTS assert(get_block(qf, i)->offset != 0); #endif } //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); } //modify_metadata(&qf->runtimedata->pc_nelts, 1); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); } /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { qf_unlock(qf, hash_bucket_index, true); } */ return QF_ITEM_INSERTED; } __device__ static inline qf_returns insert1_if_not_exists_cooperative(QF *qf, __uint64_t hash, uint8_t * value, int warpID) { uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK; uint64_t compare_remainder = hash_remainder >> qf->metadata->value_bits; /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { if (!qf_lock(qf, hash_bucket_index, true, runtime_lock)) return QF_COULDNT_LOCK; } */ //printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder); //approx filter has estimate of only one insert per item // #ifdef __CUDA_ARCH__ // atomicAdd((unsigned long long *)&qf->metadata->noccupied_slots, 1ULL); // #else // abort(); // #endif //this step can't be improved, minimum one mem check if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) { METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); set_slot(qf, hash_bucket_index, hash_remainder); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); //modify_metadata(&qf->runtimedata->pc_nelts, 1); } else { //maybe improve run_end, come back later and check uint64_t runend_index = run_end(qf, hash_bucket_index); int operation = 0; /* Insert into empty bucket */ uint64_t insert_index = runend_index + 1; uint64_t new_value = hash_remainder; /* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1; if (is_occupied(qf, hash_bucket_index)) { /* Find the counter for this remainder if it exists. */ uint64_t current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits; //uint64_t zero_terminator = runstart_index; /* Skip over counters for other remainders. */ //we look for runstart_index <= runend and current_remainder >= compare_remainder uint64_t my_runstart_index = runstart_index + warpID; uint64_t my_current_remainder = get_slot(qf, my_runstart_index) >> qf->metadata->value_bits; while(true){ //generate ballot bool ballot = !((my_runstart_index <= runend_index) && (my_current_remainder < compare_remainder)); int warp_to_query = __ffs(__ballot_sync(0xffffffff, ballot))-1; if (warp_to_query != -1){ //match kinda found! runstart_index = __shfl_sync(0xffffffff, my_runstart_index, warp_to_query); //exit successfully break; } //if all fail retry at the next iteration my_runstart_index+=32; } // while (current_remainder < compare_remainder && runstart_index <= // runend_index) { // runstart_index++; // current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits; // } //reset current remainder to be correct current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits; /* If this is the first time we've inserted the new remainder, and it is larger than any remainder in the run. */ if (runstart_index > runend_index) { operation = 1; insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* This is the first time we're inserting this remainder, but there are larger remainders already in the run. */ } else if (current_remainder != compare_remainder) { operation = 2; /* Inserting */ insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* Cases below here: we're incrementing the (simple or extended) counter for this remainder. */ /* If there's exactly one instance of this remainder. */ } else { //get remainder *value = get_slot(qf, runstart_index) && BITMASK(qf->metadata->value_bits); return QF_ITEM_FOUND; } } //else { //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //} if (operation >= 0) { uint64_t empty_slot_index; if (warpID == 0) empty_slot_index = find_first_empty_slot(qf, runend_index+1); #if DROP_ON_BIG_CLUSTER // if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ // return QF_FULL; // } if (qf->metadata->qf_full){ return QF_FULL; } if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ qf->metadata->qf_full = true; return QF_FULL; } #endif if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){ return QF_FULL; } empty_slot_index = __shfl_sync(0xffffffff, empty_slot_index, 0); if (empty_slot_index >= qf->metadata->xnslots) { printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index); return QF_FULL; } // if (warpID == 0){ // } //shift remainders changes - atm, none shift_remainders_cooperative(qf, insert_index, empty_slot_index, warpID); //set slot changes, atm, none if (warpID == 0){ set_slot(qf, insert_index, new_value); //ret_distance = insert_index - hash_bucket_index; shift_runends(qf, insert_index, empty_slot_index-1, 1); switch (operation) { case 0: METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64); break; case 1: METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64)); METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64); break; case 2: METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64)); break; default: printf("Invalid operation %d\n", operation); #ifdef __CUDA_ARCH__ __threadfence(); // ensure store issued before trap asm("trap;"); #else abort(); #endif } /* * Increment the offset for each block between the hash bucket index * and block of the empty slot * */ uint64_t i; for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <= empty_slot_index/QF_SLOTS_PER_BLOCK; i++) { if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset))) get_block(qf, i)->offset++; #if DEBUG_ASSERTS assert(get_block(qf, i)->offset != 0); #endif } //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); } // end of single threaded brace } //modify_metadata(&qf->runtimedata->pc_nelts, 1); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); //closing barce for warpID == 0 } /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { qf_unlock(qf, hash_bucket_index, true); } */ return QF_ITEM_INSERTED; } __host__ __device__ static inline qf_returns insert1(QF *qf, __uint64_t hash, uint8_t runtime_lock) { int ret_distance = 0; uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK; /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { if (!qf_lock(qf, hash_bucket_index, true, runtime_lock)) return QF_COULDNT_LOCK; } */ //printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder); if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) { METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); set_slot(qf, hash_bucket_index, hash_remainder); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); ret_distance = 0; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); //modify_metadata(&qf->runtimedata->pc_nelts, 1); } else { uint64_t runend_index = run_end(qf, hash_bucket_index); #if DROP_ON_RUNEND if (runend_index - hash_bucket_index >= RUNEND_CUTOFF){ //printf("Dropping\n"); return QF_FULL; } #endif int operation = 0; /* Insert into empty bucket */ uint64_t insert_index = runend_index + 1; uint64_t new_value = hash_remainder; /* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1; if (is_occupied(qf, hash_bucket_index)) { /* Find the counter for this remainder if it exists. */ uint64_t current_remainder = get_slot(qf, runstart_index); uint64_t zero_terminator = runstart_index; /* The counter for 0 is special. */ if (current_remainder == 0) { uint64_t t = runstart_index + 1; while (t < runend_index && get_slot(qf, t) != 0) t++; if (t < runend_index && get_slot(qf, t+1) == 0) zero_terminator = t+1; /* Three or more 0s */ else if (runstart_index < runend_index && get_slot(qf, runstart_index + 1) == 0) zero_terminator = runstart_index + 1; /* Exactly two 0s */ /* Otherwise, exactly one 0 (i.e. zero_terminator == runstart_index) */ /* May read past end of run, but that's OK because loop below can handle that */ if (hash_remainder != 0) { runstart_index = zero_terminator + 1; current_remainder = get_slot(qf, runstart_index); } } /* Skip over counters for other remainders. */ while (current_remainder < hash_remainder && runstart_index <= runend_index) { /* If this remainder has an extended counter, skip over it. */ if (runstart_index < runend_index && get_slot(qf, runstart_index + 1) < current_remainder) { runstart_index = runstart_index + 2; while (runstart_index < runend_index && get_slot(qf, runstart_index) != current_remainder) runstart_index++; runstart_index++; /* This remainder has a simple counter. */ } else { runstart_index++; } /* This may read past the end of the run, but the while loop condition will prevent us from using the invalid result in that case. */ current_remainder = get_slot(qf, runstart_index); } /* If this is the first time we've inserted the new remainder, and it is larger than any remainder in the run. */ if (runstart_index > runend_index) { operation = 1; insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* This is the first time we're inserting this remainder, but there are larger remainders already in the run. */ } else if (current_remainder != hash_remainder) { operation = 2; /* Inserting */ insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* Cases below here: we're incrementing the (simple or extended) counter for this remainder. */ /* If there's exactly one instance of this remainder. */ } else if (runstart_index == runend_index || (hash_remainder > 0 && get_slot(qf, runstart_index + 1) > hash_remainder) || (hash_remainder == 0 && zero_terminator == runstart_index)) { operation = 2; /* Insert */ insert_index = runstart_index; new_value = hash_remainder; /* If there are exactly two instances of this remainder. */ } else if ((hash_remainder > 0 && get_slot(qf, runstart_index + 1) == hash_remainder) || (hash_remainder == 0 && zero_terminator == runstart_index + 1)) { operation = 2; /* Insert */ insert_index = runstart_index + 1; new_value = 0; /* Special case for three 0s */ } else if (hash_remainder == 0 && zero_terminator == runstart_index + 2) { operation = 2; /* Insert */ insert_index = runstart_index + 1; new_value = 1; /* There is an extended counter for this remainder. */ } else { /* Move to the LSD of the counter. */ insert_index = runstart_index + 1; while (get_slot(qf, insert_index+1) != hash_remainder) insert_index++; /* Increment the counter. */ uint64_t digit, carry; do { carry = 0; digit = get_slot(qf, insert_index); // Convert a leading 0 (which is special) to a normal encoded digit if (digit == 0) { digit++; if (digit == current_remainder) digit++; } // Increment the digit digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot); // Ensure digit meets our encoding requirements if (digit == 0) { digit++; carry = 1; } if (digit == current_remainder) digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot); if (digit == 0) { digit++; carry = 1; } set_slot(qf, insert_index, digit); insert_index--; } while(insert_index > runstart_index && carry); /* If the counter needs to be expanded. */ if (insert_index == runstart_index && (carry > 0 || (current_remainder != 0 && digit >= current_remainder))) { operation = 2; /* insert */ insert_index = runstart_index + 1; if (!carry) /* To prepend a 0 before the counter if the MSD is greater than the rem */ new_value = 0; else if (carry) { /* Increment the new value because we don't use 0 to encode counters */ new_value = 2; /* If the rem is greater than or equal to the new_value then fail*/ #if DEBUG_ASSERTS if (current_remainder > 0) assert(new_value < current_remainder); #endif } } else { operation = -1; } } } //else { //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //} if (operation >= 0) { uint64_t empty_slot_index = find_first_empty_slot(qf, runend_index+1); #if DROP_ON_BIG_CLUSTER // if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ // return QF_FULL; // } if (qf->metadata->qf_full){ return QF_FULL; } if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ qf->metadata->qf_full = true; return QF_FULL; } #endif if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){ return QF_FULL; } if (empty_slot_index >= qf->metadata->xnslots) { printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index); return QF_FULL; } shift_remainders(qf, insert_index, empty_slot_index); set_slot(qf, insert_index, new_value); ret_distance = insert_index - hash_bucket_index; shift_runends(qf, insert_index, empty_slot_index-1, 1); switch (operation) { case 0: METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64); break; case 1: METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64)); METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64); break; case 2: METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64)); break; default: printf("Invalid operation %d\n", operation); #ifdef __CUDA_ARCH__ __threadfence(); // ensure store issued before trap asm("trap;"); #else abort(); #endif } /* * Increment the offset for each block between the hash bucket index * and block of the empty slot * */ uint64_t i; for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <= empty_slot_index/QF_SLOTS_PER_BLOCK; i++) { if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset))) get_block(qf, i)->offset++; #if DEBUG_ASSERTS assert(get_block(qf, i)->offset != 0); #endif } //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); } //modify_metadata(&qf->runtimedata->pc_nelts, 1); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); } /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { qf_unlock(qf, hash_bucket_index, true); } */ //return ret_distance; return QF_ITEM_INSERTED; } __device__ static inline int insert1_cooperative(QF *qf, __uint64_t hash, uint8_t runtime_lock, int warpID) { int ret_distance = 0; uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK; /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { if (!qf_lock(qf, hash_bucket_index, true, runtime_lock)) return QF_COULDNT_LOCK; } */ //printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder); //this is checking if the slot is empty, i.e. direct insert //no memmove required, no warp fancyness //no space for optimization on a warp level if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) { METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); set_slot(qf, hash_bucket_index, hash_remainder); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); ret_distance = 0; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); //modify_metadata(&qf->runtimedata->pc_nelts, 1); } else { //slot was occupied //I believe this can be optimized? not super certain about the performance reqs uint64_t runend_index = run_end(qf, hash_bucket_index); int operation = 0; /* Insert into empty bucket */ uint64_t insert_index = runend_index + 1; uint64_t new_value = hash_remainder; /* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1; if (is_occupied(qf, hash_bucket_index)) { /* Find the counter for this remainder if it exists. */ uint64_t current_remainder = get_slot(qf, runstart_index); uint64_t zero_terminator = runstart_index; /* The counter for 0 is special. */ //this logic can't be optimized if (current_remainder == 0) { uint64_t t = runstart_index + 1; while (t < runend_index && get_slot(qf, t) != 0) t++; if (t < runend_index && get_slot(qf, t+1) == 0) zero_terminator = t+1; /* Three or more 0s */ else if (runstart_index < runend_index && get_slot(qf, runstart_index + 1) == 0) zero_terminator = runstart_index + 1; /* Exactly two 0s */ /* Otherwise, exactly one 0 (i.e. zero_terminator == runstart_index) */ /* May read past end of run, but that's OK because loop below can handle that */ if (hash_remainder != 0) { runstart_index = zero_terminator + 1; current_remainder = get_slot(qf, runstart_index); } } //THIS CAN BE OPTIMIZED //rewrite //needs to be loopy boy and handle special counters //I'm thinking if you are weird then step back once? uint64_t my_runstart_index = runstart_index+warpID; uint64_t my_current_remainder = get_slot(qf, my_runstart_index); //everyone has one of 32 partitions //get slot - feeds the remainder //each remainder is either < us - good // = us - great! // > us - bad // => only occur before the specified points iff //on correct use there should be a dividing line? if (my_runstart_index <= runend_index){ } /* Skip over counters for other remainders. */ while (current_remainder < hash_remainder && runstart_index <= runend_index) { /* If this remainder has an extended counter, skip over it. */ if (runstart_index < runend_index && get_slot(qf, runstart_index + 1) < current_remainder) { //if the current slot < current remainder //a runstart_index = runstart_index + 2; while (runstart_index < runend_index && get_slot(qf, runstart_index) != current_remainder) runstart_index++; runstart_index++; /* This remainder has a simple counter. */ } else { runstart_index++; } /* This may read past the end of the run, but the while loop condition will prevent us from using the invalid result in that case. */ current_remainder = get_slot(qf, runstart_index); } /* If this is the first time we've inserted the new remainder, and it is larger than any remainder in the run. */ if (runstart_index > runend_index) { operation = 1; insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* This is the first time we're inserting this remainder, but there are larger remainders already in the run. */ } else if (current_remainder != hash_remainder) { operation = 2; /* Inserting */ insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* Cases below here: we're incrementing the (simple or extended) counter for this remainder. */ /* If there's exactly one instance of this remainder. */ } else if (runstart_index == runend_index || (hash_remainder > 0 && get_slot(qf, runstart_index + 1) > hash_remainder) || (hash_remainder == 0 && zero_terminator == runstart_index)) { operation = 2; /* Insert */ insert_index = runstart_index; new_value = hash_remainder; /* If there are exactly two instances of this remainder. */ } else if ((hash_remainder > 0 && get_slot(qf, runstart_index + 1) == hash_remainder) || (hash_remainder == 0 && zero_terminator == runstart_index + 1)) { operation = 2; /* Insert */ insert_index = runstart_index + 1; new_value = 0; /* Special case for three 0s */ } else if (hash_remainder == 0 && zero_terminator == runstart_index + 2) { operation = 2; /* Insert */ insert_index = runstart_index + 1; new_value = 1; /* There is an extended counter for this remainder. */ } else { /* Move to the LSD of the counter. */ insert_index = runstart_index + 1; while (get_slot(qf, insert_index+1) != hash_remainder) insert_index++; /* Increment the counter. */ uint64_t digit, carry; do { carry = 0; digit = get_slot(qf, insert_index); // Convert a leading 0 (which is special) to a normal encoded digit if (digit == 0) { digit++; if (digit == current_remainder) digit++; } // Increment the digit digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot); // Ensure digit meets our encoding requirements if (digit == 0) { digit++; carry = 1; } if (digit == current_remainder) digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot); if (digit == 0) { digit++; carry = 1; } set_slot(qf, insert_index, digit); insert_index--; } while(insert_index > runstart_index && carry); /* If the counter needs to be expanded. */ if (insert_index == runstart_index && (carry > 0 || (current_remainder != 0 && digit >= current_remainder))) { operation = 2; /* insert */ insert_index = runstart_index + 1; if (!carry) /* To prepend a 0 before the counter if the MSD is greater than the rem */ new_value = 0; else if (carry) { /* Increment the new value because we don't use 0 to encode counters */ new_value = 2; /* If the rem is greater than or equal to the new_value then fail*/ #if DEBUG_ASSERTS if (current_remainder > 0) assert(new_value < current_remainder); #endif } } else { operation = -1; } } } //else { //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //} if (operation >= 0) { uint64_t empty_slot_index = find_first_empty_slot(qf, runend_index+1); #if DROP_ON_BIG_CLUSTER // if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ // return QF_FULL; // } if (qf->metadata->qf_full){ return QF_FULL; } if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ qf->metadata->qf_full = true; return QF_FULL; } #endif if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){ return QF_FULL; } if (empty_slot_index >= qf->metadata->xnslots) { printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index); return QF_FULL; } shift_remainders(qf, insert_index, empty_slot_index); set_slot(qf, insert_index, new_value); ret_distance = insert_index - hash_bucket_index; shift_runends(qf, insert_index, empty_slot_index-1, 1); switch (operation) { case 0: METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64); break; case 1: METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64)); METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64); break; case 2: METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64)); break; default: printf("Invalid operation %d\n", operation); #ifdef __CUDA_ARCH__ __threadfence(); // ensure store issued before trap asm("trap;"); #else abort(); #endif } /* * Increment the offset for each block between the hash bucket index * and block of the empty slot * */ uint64_t i; for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <= empty_slot_index/QF_SLOTS_PER_BLOCK; i++) { if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset))) get_block(qf, i)->offset++; #if DEBUG_ASSERTS assert(get_block(qf, i)->offset != 0); #endif } //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); } //modify_metadata(&qf->runtimedata->pc_nelts, 1); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); } /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { qf_unlock(qf, hash_bucket_index, true); } */ return ret_distance; } __host__ __device__ static inline qf_returns insert(QF *qf, __uint64_t hash, uint64_t count, uint8_t runtime_lock) { int ret_distance = 0; uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK; /*uint64_t hash_bucket_lock_offset = hash_bucket_index % NUM_SLOTS_TO_LOCK;*/ /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { if (!qf_lock(qf, hash_bucket_index, false, runtime_lock)) return QF_COULDNT_LOCK; } */ uint64_t runend_index = run_end(qf, hash_bucket_index); /* Empty slot */ if (might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index) { METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); set_slot(qf, hash_bucket_index, hash_remainder); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); //ERIC TODO: see if this metadata is needed--probably isn't compatible with GPU //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); //modify_metadata(&qf->runtimedata->pc_nelts, 1); /* This trick will, I hope, keep the fast case fast. */ if (count > 1) { insert(qf, hash, count - 1, QF_NO_LOCK); } } else { /* Non-empty slot */ uint64_t new_values[67]; int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf,hash_bucket_index- 1) + 1; bool ret; if (!is_occupied(qf, hash_bucket_index)) { /* Empty bucket, but its slot is occupied. */ uint64_t *p = encode_counter(qf, hash_remainder, count, &new_values[67]); ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, 0, hash_bucket_index, runstart_index, p, &new_values[67] - p, 0); if (!ret) return QF_FULL; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); ret_distance = runstart_index - hash_bucket_index; } else { /* Non-empty bucket */ uint64_t current_remainder, current_count, current_end; /* Find the counter for this remainder, if one exists. */ current_end = decode_counter(qf, runstart_index, &current_remainder,&current_count); while (current_remainder < hash_remainder && !is_runend(qf, current_end)) { runstart_index = current_end + 1; current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); } /* If we reached the end of the run w/o finding a counter for this remainder, then append a counter for this remainder to the run. */ if (current_remainder < hash_remainder) { uint64_t *p = encode_counter(qf, hash_remainder, count, &new_values[67]); ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, 1, /* Append to bucket */hash_bucket_index, current_end + 1, p, &new_values[67] - p, 0); if (!ret) return QF_FULL; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); ret_distance = (current_end + 1) - hash_bucket_index; /* Found a counter for this remainder. Add in the new count. */ } else if (current_remainder == hash_remainder) { uint64_t *p = encode_counter(qf, hash_remainder, current_count + count, &new_values[67]); ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, is_runend(qf, current_end) ? 1 : 2, hash_bucket_index, runstart_index, p, &new_values[67] - p, current_end - runstart_index + 1); if (!ret) return QF_FULL; ret_distance = runstart_index - hash_bucket_index; /* No counter for this remainder, but there are larger remainders, so we're not appending to the bucket. */ } else { uint64_t *p = encode_counter(qf, hash_remainder, count, &new_values[67]); ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, 2, /* Insert to bucket */ hash_bucket_index, runstart_index, p, &new_values[67] - p, 0); if (!ret) return QF_FULL; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); ret_distance = runstart_index - hash_bucket_index; } } METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); //modify_metadata(&qf->runtimedata->pc_nelts, count); } /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { qf_unlock(qf, hash_bucket_index, false); } */ //return ret_distance; return QF_ITEM_INSERTED; } __host__ __device__ inline static int _remove(QF *qf, __uint64_t hash, uint64_t count, uint8_t runtime_lock) { int ret_numfreedslots = 0; uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t current_remainder, current_count, current_end; uint64_t new_values[67]; /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { if (!qf_lock(qf, hash_bucket_index, false, runtime_lock)) return -2; } */ /* Empty bucket */ if (!is_occupied(qf, hash_bucket_index)) return -1; uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index - 1) + 1; uint64_t original_runstart_index = runstart_index; int only_item_in_the_run = 0; /*Find the counter for this remainder, if one exists.*/ current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); while (current_remainder < hash_remainder && !is_runend(qf, current_end)) { runstart_index = current_end + 1; current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); } /* remainder not found in the given run */ if (current_remainder != hash_remainder) return -1; if (original_runstart_index == runstart_index && is_runend(qf, current_end)) only_item_in_the_run = 1; /* endode the new counter */ uint64_t *p = encode_counter(qf, hash_remainder, count > current_count ? 0 : current_count - count, &new_values[67]); ret_numfreedslots = remove_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, only_item_in_the_run, hash_bucket_index, runstart_index, p, &new_values[67] - p, current_end - runstart_index + 1); // update the nelements. //modify_metadata(&qf->runtimedata->pc_nelts, -count); /*qf->metadata->nelts -= count;*/ /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { qf_unlock(qf, hash_bucket_index, false); } */ return ret_numfreedslots; } /*********************************************************************** * Code that uses the above to implement key-value-counter operations. * ***********************************************************************/ __host__ uint64_t qf_init(QF *qf, uint64_t nslots, uint64_t key_bits, uint64_t value_bits, enum qf_hashmode hash, uint32_t seed, void* buffer, uint64_t buffer_len) { uint64_t num_slots, xnslots, nblocks; uint64_t key_remainder_bits, bits_per_slot; uint64_t size; uint64_t total_num_bytes; assert(popcnt(nslots) == 1); /* nslots must be a power of 2 */ num_slots = nslots; xnslots = nslots + 10*sqrt((double)nslots); nblocks = (xnslots + QF_SLOTS_PER_BLOCK - 1) / QF_SLOTS_PER_BLOCK; key_remainder_bits = key_bits; while (nslots > 1 && key_remainder_bits > 0) { key_remainder_bits--; nslots >>= 1; } assert(key_remainder_bits >= 2); bits_per_slot = key_remainder_bits + value_bits; assert (QF_BITS_PER_SLOT == 0 || QF_BITS_PER_SLOT == bits_per_slot); assert(bits_per_slot > 1); #if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64 size = nblocks * sizeof(qfblock); #else size = nblocks * (sizeof(qfblock) + QF_SLOTS_PER_BLOCK * bits_per_slot / 8); #endif total_num_bytes = sizeof(qfmetadata) + size; if (buffer == NULL || total_num_bytes > buffer_len) return total_num_bytes; // memset(buffer, 0, total_num_bytes); qf->metadata = (qfmetadata *)(buffer); qf->blocks = (qfblock *)(qf->metadata + 1); qf->metadata->magic_endian_number = MAGIC_NUMBER; qf->metadata->reserved = 0; qf->metadata->hash_mode = hash; qf->metadata->total_size_in_bytes = size; qf->metadata->seed = seed; qf->metadata->nslots = num_slots; qf->metadata->xnslots = xnslots; qf->metadata->key_bits = key_bits; qf->metadata->value_bits = value_bits; qf->metadata->key_remainder_bits = key_remainder_bits; qf->metadata->bits_per_slot = bits_per_slot; qf->metadata->range = qf->metadata->nslots; qf->metadata->range <<= qf->metadata->key_remainder_bits; qf->metadata->nblocks = (qf->metadata->xnslots + QF_SLOTS_PER_BLOCK - 1) / QF_SLOTS_PER_BLOCK; qf->metadata->nelts = 0; qf->metadata->ndistinct_elts = 0; qf->metadata->noccupied_slots = 0; qf->metadata->qf_full = false; qf->runtimedata->num_locks = ((qf->metadata->xnslots/NUM_SLOTS_TO_LOCK)+2); pc_init(&qf->runtimedata->pc_nelts, (int64_t*)&qf->metadata->nelts, 8, 100); pc_init(&qf->runtimedata->pc_ndistinct_elts, (int64_t*)&qf->metadata->ndistinct_elts, 8, 100); pc_init(&qf->runtimedata->pc_noccupied_slots, (int64_t*)&qf->metadata->noccupied_slots, 8, 100); /* initialize container resize */ qf->runtimedata->auto_resize = 0; qf->runtimedata->container_resize = qf_resize_malloc; /* initialize all the locks to 0 */ qf->runtimedata->metadata_lock = 0; //etodo: copy this to GPU qf->runtimedata->locks = (uint16_t *)calloc(qf->runtimedata->num_locks, sizeof(uint16_t)); if (qf->runtimedata->locks == NULL) { perror("Couldn't allocate memory for runtime locks."); exit(EXIT_FAILURE); } #ifdef LOG_WAIT_TIME qf->runtimedata->wait_times = (wait_time_data* )calloc(qf->runtimedata->num_locks+1, sizeof(wait_time_data)); if (qf->runtimedata->wait_times == NULL) { perror("Couldn't allocate memory for runtime wait_times."); exit(EXIT_FAILURE); } #endif return total_num_bytes; } __host__ uint64_t qf_use(QF* qf, void* buffer, uint64_t buffer_len) { qf->metadata = (qfmetadata *)(buffer); if (qf->metadata->total_size_in_bytes + sizeof(qfmetadata) > buffer_len) { return qf->metadata->total_size_in_bytes + sizeof(qfmetadata); } qf->blocks = (qfblock *)(qf->metadata + 1); qf->runtimedata = (qfruntime *)calloc(sizeof(qfruntime), 1); if (qf->runtimedata == NULL) { perror("Couldn't allocate memory for runtime data."); exit(EXIT_FAILURE); } /* initialize all the locks to 0 */ qf->runtimedata->metadata_lock = 0; qf->runtimedata->locks = (uint16_t *)calloc(qf->runtimedata->num_locks, sizeof(uint16_t)); if (qf->runtimedata->locks == NULL) { perror("Couldn't allocate memory for runtime locks."); exit(EXIT_FAILURE); } #ifdef LOG_WAIT_TIME qf->runtimedata->wait_times = (wait_time_data* )calloc(qf->runtimedata->num_locks+1, sizeof(wait_time_data)); if (qf->runtimedata->wait_times == NULL) { perror("Couldn't allocate memory for runtime wait_times."); exit(EXIT_FAILURE); } #endif return sizeof(qfmetadata) + qf->metadata->total_size_in_bytes; } __host__ void *qf_destroy(QF *qf) { assert(qf->runtimedata != NULL); if (qf->runtimedata->locks != NULL) free((void*)qf->runtimedata->locks); if (qf->runtimedata->wait_times != NULL) free(qf->runtimedata->wait_times); if (qf->runtimedata->f_info.filepath != NULL) free(qf->runtimedata->f_info.filepath); free(qf->runtimedata); return (void*)qf->metadata; } __host__ bool qf_malloc(QF *qf, uint64_t nslots, uint64_t key_bits, uint64_t value_bits, enum qf_hashmode hash, bool on_device, uint32_t seed) { uint64_t total_num_bytes = qf_init(qf, nslots, key_bits, value_bits, hash, seed, NULL, 0); //buffer malloc bad? void* buffer = malloc(total_num_bytes); memset(buffer, 0, total_num_bytes); //printf("QF bytes: %llu\n", total_num_bytes); if (buffer == NULL) { perror("Couldn't allocate memory for the CQF."); exit(EXIT_FAILURE); } qf->runtimedata = (qfruntime*)calloc(sizeof(qfruntime), 1); if (qf->runtimedata == NULL) { perror("Couldn't allocate memory for runtime data."); exit(EXIT_FAILURE); } uint64_t init_size = qf_init(qf, nslots, key_bits, value_bits, hash, seed, buffer, total_num_bytes); if (init_size == total_num_bytes) return total_num_bytes; else return -1; } __host__ bool qf_free(QF *qf) { assert(qf->metadata != NULL); void *buffer = qf_destroy(qf); if (buffer != NULL) { free(buffer); return true; } return false; } //consolidate all of the device construction into one convenient func! __host__ void qf_malloc_device(QF** qf, int nbits, bool bulk_config){ //bring in compile #define int rbits = 8; int vbits = 0; QF host_qf; QF temp_device_qf; QF* temp_dev_ptr; uint64_t nslots = 1ULL << nbits; int num_hash_bits = nbits+rbits; qf_malloc(&host_qf, nslots, num_hash_bits, vbits, QF_HASH_INVERTIBLE, false, 0); qf_set_auto_resize(&host_qf, false); qfruntime* _runtime; qfmetadata* _metadata; qfblock* _blocks; uint16_t * dev_locks; uint64_t ** buffers; uint64_t * buffer_sizes; if (bulk_config){ uint64_t num_locks = host_qf.runtimedata->num_locks; //allocate 1 lock so that hipFree doesn't break later hipMalloc((void ** )&dev_locks, 1 * sizeof(uint16_t)); //are these 2x necessary? hipMalloc((void **) & buffer_sizes, 2*num_locks*sizeof(uint64_t)); hipMalloc((void **)&buffers, 2*num_locks*sizeof(uint64_t*)); } else { //point API, multiply locks hipMalloc((void ** )&dev_locks, host_qf.runtimedata->num_locks*LOCK_DIST * sizeof(uint16_t)); hipMemset(dev_locks, 0, host_qf.runtimedata->num_locks*LOCK_DIST * sizeof(uint16_t)); hipMalloc((void **) & buffer_sizes, 1*sizeof(uint64_t)); hipMalloc((void **)&buffers, 1*sizeof(uint64_t*)); } //wipe and replace free(host_qf.runtimedata->locks); host_qf.runtimedata->locks = dev_locks; hipMalloc((void**)&_runtime, sizeof(qfruntime)); hipMalloc((void**)&_metadata, sizeof(qfmetadata)); hipMalloc((void**)&_blocks, qf_get_total_size_in_bytes(&host_qf)); //uint64_t num_locks = host_qf.runtimedata->num_locks; //insert these into host_qf so dev qf has access. //they don't need to be wiped as buffers are reset before every insert. host_qf.runtimedata->buffers = buffers; host_qf.runtimedata->buffer_sizes = buffer_sizes; hipMemcpy(_runtime, host_qf.runtimedata, sizeof(qfruntime), hipMemcpyHostToDevice); hipMemcpy(_metadata, host_qf.metadata, sizeof(qfmetadata), hipMemcpyHostToDevice); hipMemcpy(_blocks, host_qf.blocks, qf_get_total_size_in_bytes(&host_qf), hipMemcpyHostToDevice); temp_device_qf.runtimedata = _runtime; temp_device_qf.metadata = _metadata; temp_device_qf.blocks = _blocks; //this might be buggy //request to fill the dev ptr with a QF, then copy over, then copy that to qf hipMalloc((void **)&temp_dev_ptr, sizeof(QF)); hipMemcpy(temp_dev_ptr, &temp_device_qf, sizeof(QF), hipMemcpyHostToDevice); *qf = temp_dev_ptr; } //TODO: make me destroy buffers modifiable __host__ void qf_destroy_device(QF * qf){ QF * host_qf; hipHostMalloc((void ** )&host_qf, sizeof(QF)); hipMemcpy(host_qf, qf, sizeof(QF), hipMemcpyDeviceToHost); qfruntime* _runtime; hipHostMalloc((void **) &_runtime, sizeof(qfruntime)); hipMemcpy(_runtime, host_qf->runtimedata, sizeof(qfruntime), hipMemcpyDeviceToHost); //may need to have _runtimedata shunted into another host object //ill synchronize before this to double check assert(_runtime != NULL); if (_runtime->locks != NULL) hipFree(_runtime->locks); if (_runtime->buffers != NULL){ hipFree(_runtime->buffers); hipFree(_runtime->buffer_sizes); } if (_runtime->wait_times != NULL) hipFree(_runtime->wait_times); //this one may break if (_runtime->f_info.filepath != NULL) hipFree(host_qf->runtimedata->f_info.filepath); hipFree(host_qf->runtimedata); hipFree(host_qf->metadata); hipFree(host_qf->blocks); hipHostFree(host_qf); hipHostFree(_runtime); } __host__ void qf_copy(QF *dest, const QF *src) { DEBUG_CQF("%s\n","Source CQF"); DEBUG_DUMP(src); memcpy(dest->runtimedata, src->runtimedata, sizeof(qfruntime)); memcpy(dest->metadata, src->metadata, sizeof(qfmetadata)); memcpy(dest->blocks, src->blocks, src->metadata->total_size_in_bytes); DEBUG_CQF("%s\n","Destination CQF after copy."); DEBUG_DUMP(dest); } __host__ void qf_reset(QF *qf) { qf->metadata->nelts = 0; qf->metadata->ndistinct_elts = 0; qf->metadata->noccupied_slots = 0; #ifdef LOG_WAIT_TIME memset(qf->wait_times, 0, (qf->runtimedata->num_locks+1)*sizeof(wait_time_data)); #endif #if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64 memset(qf->blocks, 0, qf->metadata->nblocks* sizeof(qfblock)); #else memset(qf->blocks, 0, qf->metadata->nblocks*(sizeof(qfblock) + QF_SLOTS_PER_BLOCK * qf->metadata->bits_per_slot / 8)); #endif } __host__ int64_t qf_resize_malloc(QF *qf, uint64_t nslots) { QF new_qf; if (!qf_malloc(&new_qf, nslots, qf->metadata->key_bits, qf->metadata->value_bits, qf->metadata->hash_mode, false, qf->metadata->seed)) return -1; if (qf->runtimedata->auto_resize) qf_set_auto_resize(&new_qf, true); // copy keys from qf into new_qf QFi qfi; qf_iterator_from_position(qf, &qfi, 0); int64_t ret_numkeys = 0; do { uint64_t key, value, count; qfi_get_hash(&qfi, &key, &value, &count); qfi_next(&qfi); int ret = qf_insert(&new_qf, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH); if (ret < 0) { printf("Failed to insert key: %ld into the new CQF.\n", key); return ret; } ret_numkeys++; } while(!qfi_end(&qfi)); qf_free(qf); memcpy(qf, &new_qf, sizeof(QF)); return ret_numkeys; } uint64_t qf_resize(QF* qf, uint64_t nslots, void* buffer, uint64_t buffer_len) { printf("QF attempting resize - This will fail\n"); QF new_qf; new_qf.runtimedata = (qfruntime *)calloc(sizeof(qfruntime), 1); if (new_qf.runtimedata == NULL) { perror("Couldn't allocate memory for runtime data.\n"); exit(EXIT_FAILURE); } uint64_t init_size = qf_init(&new_qf, nslots, qf->metadata->key_bits, qf->metadata->value_bits, qf->metadata->hash_mode, qf->metadata->seed, buffer, buffer_len); if (init_size > buffer_len) return init_size; if (qf->runtimedata->auto_resize) qf_set_auto_resize(&new_qf, true); // copy keys from qf into new_qf QFi qfi; qf_iterator_from_position(qf, &qfi, 0); do { uint64_t key, value, count; qfi_get_hash(&qfi, &key, &value, &count); qfi_next(&qfi); int ret = qf_insert(&new_qf, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH); if (ret < 0) { printf("Failed to insert key: %ld into the new CQF.\n", key); abort(); // kill kernel with error } } while(!qfi_end(&qfi)); qf_free(qf); memcpy(qf, &new_qf, sizeof(QF)); return init_size; } __host__ void qf_set_auto_resize(QF* qf, bool enabled) { if (enabled) qf->runtimedata->auto_resize = 1; else qf->runtimedata->auto_resize = 0; } __host__ __device__ qf_returns qf_insert_not_exists(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t flags, uint8_t * retvalue) { // We fill up the CQF up to 95% load factor. // This is a very conservative check. //TODO: GPU resizing /* if (qf_get_num_occupied_slots(qf) >= qf->metadata->nslots * 0.95) { if (qf->runtimedata->auto_resize) { fprintf(stdout, "Resizing the CQF.\n"); if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) < 0) { fprintf(stderr, "Resizing the failed.\n"); return QF_FULL; } } else return QF_FULL; } */ // if (count == 0) // return 0; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); //printf("Inside insert, new hash is recorded as %llu\n", hash); qf_returns ret; if (count == 1) ret = insert1_if_not_exists(qf, hash, retvalue); //for now count is always 1 //else //ret = insert(qf, hash, count, flags); // check for fullness based on the distance from the home slot to the slot // in which the key is inserted /* if (ret == QF_FULL || ret > DISTANCE_FROM_HOME_SLOT_CUTOFF) { float load_factor = qf_get_num_occupied_slots(qf) / (float)qf->metadata->nslots; fprintf(stdout, "Load factor: %lf\n", load_factor); if (qf->runtimedata->auto_resize) { fprintf(stdout, "Resizing the CQF.\n"); if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) > 0) { if (ret == QF_FULL) { if (count == 1) ret = insert1(qf, hash, flags); else ret = insert(qf, hash, count, flags); } fprintf(stderr, "Resize finished.\n"); } else { fprintf(stderr, "Resize failed\n"); ret = QF_FULL; } } else { fprintf(stderr, "The CQF is filling up.\n"); ret = QF_FULL; } } */ return ret; } __device__ qf_returns qf_insert_not_exists_cooperative(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t flags, uint8_t * retvalue, int warpID) { // We fill up the CQF up to 95% load factor. // This is a very conservative check. //TODO: GPU resizing /* if (qf_get_num_occupied_slots(qf) >= qf->metadata->nslots * 0.95) { if (qf->runtimedata->auto_resize) { fprintf(stdout, "Resizing the CQF.\n"); if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) < 0) { fprintf(stderr, "Resizing the failed.\n"); return QF_FULL; } } else return QF_FULL; } */ // if (count == 0) // return 0; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); //printf("Inside insert, new hash is recorded as %llu\n", hash); qf_returns ret; if (count == 1) ret = insert1_if_not_exists_cooperative(qf, hash, retvalue, warpID); //for now count is always 1 //else //ret = insert(qf, hash, count, flags); // check for fullness based on the distance from the home slot to the slot // in which the key is inserted /* if (ret == QF_FULL || ret > DISTANCE_FROM_HOME_SLOT_CUTOFF) { float load_factor = qf_get_num_occupied_slots(qf) / (float)qf->metadata->nslots; fprintf(stdout, "Load factor: %lf\n", load_factor); if (qf->runtimedata->auto_resize) { fprintf(stdout, "Resizing the CQF.\n"); if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) > 0) { if (ret == QF_FULL) { if (count == 1) ret = insert1(qf, hash, flags); else ret = insert(qf, hash, count, flags); } fprintf(stderr, "Resize finished.\n"); } else { fprintf(stderr, "Resize failed\n"); ret = QF_FULL; } } else { fprintf(stderr, "The CQF is filling up.\n"); ret = QF_FULL; } } */ return ret; } __host__ __device__ qf_returns qf_insert(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t flags) { // We fill up the CQF up to 95% load factor. // This is a very conservative check. //TODO: GPU resizing /* if (qf_get_num_occupied_slots(qf) >= qf->metadata->nslots * 0.95) { if (qf->runtimedata->auto_resize) { fprintf(stdout, "Resizing the CQF.\n"); if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) < 0) { fprintf(stderr, "Resizing the failed.\n"); return QF_FULL; } } else return QF_FULL; } */ if (count == 0) return QF_ITEM_INSERTED; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); //printf("Inside insert, new hash is recorded as %llu\n", hash); qf_returns ret; if (count == 1){ ret = insert1(qf, hash, flags); } else { ret = insert(qf, hash, count, flags); } // check for fullness based on the distance from the home slot to the slot // in which the key is inserted /* if (ret == QF_FULL || ret > DISTANCE_FROM_HOME_SLOT_CUTOFF) { float load_factor = qf_get_num_occupied_slots(qf) / (float)qf->metadata->nslots; fprintf(stdout, "Load factor: %lf\n", load_factor); if (qf->runtimedata->auto_resize) { fprintf(stdout, "Resizing the CQF.\n"); if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) > 0) { if (ret == QF_FULL) { if (count == 1) ret = insert1(qf, hash, flags); else ret = insert(qf, hash, count, flags); } fprintf(stderr, "Resize finished.\n"); } else { fprintf(stderr, "Resize failed\n"); ret = QF_FULL; } } else { fprintf(stderr, "The CQF is filling up.\n"); ret = QF_FULL; } } */ return ret; } /*------------------------ GPU Modifications --------------------------*/ //approx filter locking code //locking implementation for the 16 bit locks //undefined behavior if you try to unlock a not locked lock __device__ void lock_16(uint16_t * lock, uint64_t index){ uint16_t zero = 0; uint16_t one = 1; //while (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) != zero); //unsigned short int patch for cuda while (atomicCAS((unsigned short int *) &lock[index*LOCK_DIST], (unsigned short int) zero, (unsigned short int) one) != zero); } __device__ void lock_16_coop(uint16_t * lock, uint64_t index, int warpID){ uint16_t zero = 0; uint16_t one = 1; if (warpID ==0){ //while (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) != zero); //quick patch, cuda wants unsigned short int * while (atomicCAS((unsigned short int *) &lock[index*LOCK_DIST], (unsigned short int) zero, (unsigned short int) one) != zero); } __syncwarp(); } __device__ void unlock_16(uint16_t * lock, uint64_t index){ uint16_t zero = 0; uint16_t one = 1; //atomicCAS((uint16_t *) &lock[index*LOCK_DIST], one, zero); //CUDA CAS Patch atomicCAS((unsigned short int *) &lock[index*LOCK_DIST], (unsigned short int) one, (unsigned short int) zero); } //lock_16 but built to be included as a piece of a while loop // this is more in line with traditional cuda processing, may increase throughput __device__ bool try_lock_16(uint16_t * lock, uint64_t index){ uint16_t zero = 0; uint16_t one = 1; //if (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) == zero){ if (atomicCAS((unsigned short int *) &lock[index*LOCK_DIST], (unsigned short int) zero, (unsigned short int) one) == zero){ return true; } return false; } __device__ bool try_lock_16_coop(uint16_t * lock, uint64_t index, int warpID){ uint16_t zero = 0; uint16_t one = 1; bool ballot = 0; if (warpID == 0){ if (atomicCAS((unsigned short int *) &lock[index*LOCK_DIST], (unsigned short int) zero, (unsigned short int) one) == zero){ //if (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) == zero){ ballot = 1; } } ballot = __shfl_sync(0xffffffff, ballot, 0); return ballot; } __device__ __forceinline__ void exchange(uint64_t * arr, uint64_t i, uint64_t j){ uint64_t temp = arr[i]; arr[i] = arr[j]; arr[j] = temp; //maybe synchthreads? } __device__ __forceinline__ void compare(uint64_t * arr, uint64_t i, uint64_t j, bool dir){ if (dir == (arr[i] > arr[j])){ exchange(arr, i, j); } } //return the biggest int of a uint64 __device__ __forceinline__ int biggest_bit(uint64_t n){ return 63 - __clzll((unsigned long long int) n); } __device__ __forceinline__ uint64_t biggest_pow_2(uint64_t n){ return 1UL<<biggest_bit(n)-1; } __global__ void hash_all(QF* qf, uint64_t* vals, uint64_t* hashes, uint64_t nvals, uint8_t flags) { uint64_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= nvals){ return; } uint64_t key = vals[idx]; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) & (qf->metadata->range - 1); else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } //uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); hashes[idx] = key; return; } //revised work pipeline // 1) Set all offsets to keys here based on relative offset + keys - skips the launch call later - TODO: double check that (keys + offset) - keys == offset. -- cpp says this works // 2) subtract sets of keys from each other to get the relative offsets - these will give offsets, last key needs to subtract from origin pointer // this means that the keys here are set to point to the START of their bucket __global__ void set_buffers_binary(QF * qf, uint64_t num_keys, uint64_t * keys, uint8_t flags){ uint64_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= qf->runtimedata->num_locks) return; uint64_t slots_per_lock = NUM_SLOTS_TO_LOCK; //since we are finding all boundaries, we only need //printf("idx %llu\n", idx); //this sounds right? - they divide to go back so I think this is fine uint64_t boundary = (slots_per_lock*idx); //<< qf->metadata->bits_per_slot; //This is the code I'm stealing that assumption from //uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; //uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); //uint64_t lock_index = hash_bucket_index / slots_per_lock; uint64_t lower = 0; uint64_t upper = num_keys; uint64_t index = upper-lower; //upper is non inclusive bound //if we exceed bounds that's our index while (upper != lower){ index = lower + (upper - lower)/2; if ((keys[index] >> qf->metadata->bits_per_slot) < boundary){ //false - the list before this point can be removed lower = index+1; //jump to a new midpoint } else if (index==0){ //will this fix? otherwise need to patch via round up upper = index; } else if ((keys[index-1] >> qf->metadata->bits_per_slot) < boundary) { //set index! this is the first instance where I am valid and the next isnt //buffers[idx] = keys+index; break; } else { //we are too far right, all keys to the right do not matter upper = index; } } //we either exited or have an edge condition: //upper == lower iff 0 or max key index = lower + (upper - lower)/2; qf->runtimedata->buffers[idx] = keys + index; } __global__ void find_clusters(QF* qf, uint64_t * cluster_lengths, uint64_t * max_clusters){ uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x; if (tid != 0) return; uint64_t start_slot = 0; uint64_t i =0; while (start_slot < qf->metadata->nslots){ uint64_t old_start = start_slot; start_slot = find_first_empty_slot(qf, start_slot); if (start_slot == old_start){ start_slot++; } else { cluster_lengths[i] = start_slot-old_start; i++; } } max_clusters[0] = i; } //this can maybe be rolled into set_buffers_binary //it performs an identical set of operations that are O(1) here // O(log n) there, but maybe amortized __global__ void set_buffer_lens(QF * qf, uint64_t num_keys, uint64_t * keys){ uint64_t num_buffers = qf->runtimedata->num_locks; uint64_t idx = threadIdx.x + blockDim.x*blockIdx.x; if (idx >= num_buffers) return; uint64_t** buffers = qf->runtimedata->buffers; uint64_t * buffer_sizes = qf->runtimedata->buffer_sizes; //only 1 thread will diverge - should be fine - any cost already exists because of tail if (idx != num_buffers-1){ //this should work? not 100% convinced but it seems ok buffer_sizes[idx] = buffers[idx+1] - buffers[idx]; } else { buffer_sizes[idx] = num_keys - (buffers[idx] - keys); } return; } //insert from buffers using prehashed_data __global__ void insert_from_buffers_hashed(QF* qf, uint64_t evenness){ //uint64_t num_buffers, uint64_t** buffers, volatile uint64_t * buffer_counts; uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness; if (idx >= qf->runtimedata->num_locks) return; uint64_t * buffer_counts = qf->runtimedata->buffer_sizes; uint64_t ** buffers = qf->runtimedata->buffers; //at the start, we sort //we are exceeding bounds by 1 //quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0); //no need to sort if empty - this will cause overflow as 0-1 == max_uint // if (buffer_counts[idx] > 0) { // quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0); // //assert(assert_sorted(buffers[idx], buffer_counts[idx])); // } uint64_t my_count = buffer_counts[idx]; for (uint64_t i =0; i < my_count; i++){ int ret = qf_insert(qf, buffers[idx][i], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH); //internal threadfence. Bad? actually seems to be fine //__threadfence(); } __threadfence(); } //insert from buffers using prehashed_data //use warp cooperative operations __global__ void insert_from_buffers_cooperative(QF* qf, uint64_t evenness){ //uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness; uint64_t tid = threadIdx.x+blockDim.x*blockIdx.x; uint64_t itemID = tid / 32; int warpID = tid % 32; uint64_t idx = 2*itemID+evenness; if (idx >= qf->runtimedata->num_locks) return; uint64_t * buffer_counts = qf->runtimedata->buffer_sizes; uint64_t ** buffers = qf->runtimedata->buffers; //at the start, we sort //we are exceeding bounds by 1 //quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0); //no need to sort if empty - this will cause overflow as 0-1 == max_uint // if (buffer_counts[idx] > 0) { // quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0); // //assert(assert_sorted(buffers[idx], buffer_counts[idx])); // } //uint64_t - uint64_t should yield offset into vals //uint64_t absolute_offset = buffers[idx]- buffers; uint64_t my_count = buffer_counts[idx]; for (uint64_t i =0; i < my_count; i++){ //assert(keys[absolute_offset+i] == buffers[idx][i]); uint8_t query; qf_returns ret_val = qf_insert_not_exists_cooperative(qf, buffers[idx][i], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH, &query, warpID); #if DEBUG_ASSERTS assert(ret_val != QF_FULL); #endif //internal threadfence. Bad? actually seems to be fine //__threadfence(); } __threadfence(); } __global__ void insert_from_buffers_thrust(QF* qf, uint64_t evenness, uint64_t * keys, uint64_t * vals, uint64_t num_keys){ uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness; if (idx >= qf->runtimedata->num_locks) return; uint64_t * buffer_counts = qf->runtimedata->buffer_sizes; uint64_t ** buffers = qf->runtimedata->buffers; //at the start, we sort //we are exceeding bounds by 1 //quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0); //no need to sort if empty - this will cause overflow as 0-1 == max_uint // if (buffer_counts[idx] > 0) { // quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0); // //assert(assert_sorted(buffers[idx], buffer_counts[idx])); // } //uint64_t - uint64_t should yield offset into vals uint64_t absolute_offset = buffers[idx] - keys; if (absolute_offset >= num_keys){ printf("Offset is %llu, num_keys %llu\n", absolute_offset, num_keys); return; } uint64_t my_count = buffer_counts[idx]; for (uint64_t i =0; i < my_count; i++){ //assert(keys[absolute_offset+i] == buffers[idx][i]); int ret = qf_insert(qf, buffers[idx][i], 0, vals[absolute_offset+i], QF_NO_LOCK | QF_KEY_IS_HASH); //internal threadfence. Bad? actually seems to be fine //__threadfence(); } __threadfence(); } //insert from buffers using prehashed_data __global__ void delete_from_buffers_hashed(QF* qf, uint64_t evenness){ uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness; if (idx >= qf->runtimedata->num_locks) return; uint64_t ** buffers = qf->runtimedata->buffers; uint64_t * buffer_counts = qf->runtimedata->buffer_sizes; //at the start, we sort //we are exceeding bounds by 1 //quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0); //no need to sort if empty - this will cause overflow as 0-1 == max_uint // if (buffer_counts[idx] > 0) { // quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0); // //assert(assert_sorted(buffers[idx], buffer_counts[idx])); // } uint64_t my_count = buffer_counts[idx]; //0 - my count for loop, working backwords should be faster? for (uint64_t i = my_count; i >=1; i--){ int ret = qf_remove(qf, buffers[idx][i-1], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH); //internal threadfence. Bad? actually seems to be fine //__threadfence(); } __threadfence(); } __device__ qf_returns point_insert_not_exists(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags){ uint8_t query; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key % qf->metadata->range; uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits; //uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK; //encode extensions outside of the lock while (true){ if (try_lock_16(qf->runtimedata->locks, lock_index)){ //this can also be a regular lock? //if (try_lock_16(qf->runtimedata->locks, lock_index+1)){ lock_16(qf->runtimedata->locks, lock_index+1); qf_returns ret = qf_insert_not_exists(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH, &query); if (ret == QF_ITEM_FOUND){ returnedVal = query; } __threadfence(); unlock_16(qf->runtimedata->locks, lock_index+1); unlock_16(qf->runtimedata->locks, lock_index); return ret; //} unlock_16(qf->runtimedata->locks, lock_index); } } } __device__ qf_returns point_insert_not_exists_cooperative(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags, int warpID){ uint8_t query; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key % qf->metadata->range; uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits; //uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK; //encode extensions outside of the lock while (true){ if (try_lock_16_coop(qf->runtimedata->locks, lock_index, warpID)){ //this can also be a regular lock? //if (try_lock_16(qf->runtimedata->locks, lock_index+1)){ lock_16_coop(qf->runtimedata->locks, lock_index+1, warpID); qf_returns ret = qf_insert_not_exists_cooperative(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH, &query, warpID); if (ret == QF_ITEM_FOUND){ returnedVal = query; } __threadfence(); if (warpID ==0){ unlock_16(qf->runtimedata->locks, lock_index+1); unlock_16(qf->runtimedata->locks, lock_index); } return ret; //} if (warpID ==0) unlock_16(qf->runtimedata->locks, lock_index); } } } __device__ int point_remove(QF* qf, uint64_t key, uint8_t value, uint8_t flags){ if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key % qf->metadata->range; uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits; //uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK; //encode extensions outside of the lock while (true){ if (try_lock_16(qf->runtimedata->locks, lock_index)){ //this can also be a regular lock? //if (try_lock_16(qf->runtimedata->locks, lock_index+1)){ lock_16(qf->runtimedata->locks, lock_index+1); int ret = qf_remove(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH); __threadfence(); unlock_16(qf->runtimedata->locks, lock_index+1); unlock_16(qf->runtimedata->locks, lock_index); return ret; //} unlock_16(qf->runtimedata->locks, lock_index); } } } __device__ qf_returns point_insert(QF* qf, uint64_t key, uint8_t value, uint8_t flags){ if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key % qf->metadata->range; uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits; //uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK; //encode extensions outside of the lock while (true){ if (try_lock_16(qf->runtimedata->locks, lock_index)){ //this can also be a regular lock? //if (try_lock_16(qf->runtimedata->locks, lock_index+1)){ lock_16(qf->runtimedata->locks, lock_index+1); qf_returns ret = qf_insert(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH); __threadfence(); unlock_16(qf->runtimedata->locks, lock_index+1); unlock_16(qf->runtimedata->locks, lock_index); return ret; //} unlock_16(qf->runtimedata->locks, lock_index); } } } __device__ uint64_t point_query(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags){ if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key % qf->metadata->range; uint64_t query; uint64_t ret = qf_query(qf, hash, &query, QF_NO_LOCK | QF_KEY_IS_HASH); returnedVal = query; return ret; } __device__ uint64_t point_query_concurrent(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags){ if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key % qf->metadata->range; uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits; //uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK; //encode extensions outside of the lock while (true){ if (try_lock_16(qf->runtimedata->locks, lock_index)){ //this can also be a regular lock? //if (try_lock_16(qf->runtimedata->locks, lock_index+1)){ lock_16(qf->runtimedata->locks, lock_index+1); uint64_t query; uint64_t ret = qf_query(qf, hash, &query, QF_NO_LOCK | QF_KEY_IS_HASH); __threadfence(); unlock_16(qf->runtimedata->locks, lock_index+1); unlock_16(qf->runtimedata->locks, lock_index); returnedVal = query; return ret; //} unlock_16(qf->runtimedata->locks, lock_index); } } } __global__ void point_bulk_get(QF * qf, uint64_t * hashes, uint64_t nitems, uint64_t * counter){ uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >=nitems) return; uint8_t query; //point_query(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags) if (point_query(qf, hashes[tid] % qf->metadata->range, 0, query, QF_NO_LOCK) ==0){ //on item not found increment atomicAdd((unsigned long long int *) counter, (unsigned long long int) 1); } } __global__ void point_bulk_get_nocount(QF * qf, uint64_t * hashes, uint64_t nitems){ uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >=nitems) return; uint8_t query; //point_query(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags) point_query(qf, hashes[tid] % qf->metadata->range, 0, query, QF_NO_LOCK); } __global__ void bulk_get_cooperative(QF * qf, uint64_t * hashes, uint64_t nitems, uint64_t * counter){ uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x; uint64_t itemID = tid /32; int warpID = tid % 32; if (itemID >= qf->runtimedata->num_locks) return; uint64_t * buffer_counts = qf->runtimedata->buffer_sizes; uint64_t ** buffers = qf->runtimedata->buffers; //at the start, we sort //we are exceeding bounds by 1 //quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0); //no need to sort if empty - this will cause overflow as 0-1 == max_uint // if (buffer_counts[idx] > 0) { // quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0); // //assert(assert_sorted(buffers[idx], buffer_counts[idx])); // } uint64_t my_count = buffer_counts[itemID]; for (uint64_t i =warpID; i < my_count; i+=32){ //int ret = qf_insert(qf, buffers[itemID][i], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH); uint8_t query; if (point_query(qf, buffers[itemID][i] % qf->metadata->range, 0, query, QF_NO_LOCK | QF_KEY_IS_HASH) ==0){ //atomicAdd((unsigned long long int *) counter, (unsigned long long int) 1); } //internal threadfence. Bad? actually seems to be fine //__threadfence(); } } __host__ uint64_t cooperative_bulk_get_wrapper(QF * qf, uint64_t * hashes, uint64_t nitems){ auto start = std::chrono::high_resolution_clock::now(); uint64_t key_block_size = 32; //start with num_locks, get counts //This is slow, but there isn't a better way to do it //we'll have to see how this affects performance uint64_t * dev_num_locks; hipMallocManaged((void **)&dev_num_locks, sizeof(uint64_t)); hipLaunchKernelGGL(( get_dev_nvals), dim3(1),dim3(1), 0, 0, qf, dev_num_locks); hipDeviceSynchronize(); uint64_t num_locks = dev_num_locks[0]; hipFree(dev_num_locks); uint64_t key_block = (nitems-1)/key_block_size + 1; //keys are hashed, now need to treat them as hashed in all further functions hipLaunchKernelGGL(( hash_all), dim3(key_block), dim3(key_block_size), 0, 0, qf, hashes, hashes, nitems, 0); thrust::sort(thrust::device, hashes, hashes+nitems); hipLaunchKernelGGL(( set_buffers_binary), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nitems, hashes, 0); hipLaunchKernelGGL(( set_buffer_lens), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nitems, hashes); uint64_t * misses; //this is fine, should never be triggered hipMallocManaged((void **)&misses, sizeof(uint64_t)); hipMemset(misses, 0, sizeof(uint64_t)); hipDeviceSynchronize(); auto midpoint = std::chrono::high_resolution_clock::now(); const int bulk_block_size = 1024; hipLaunchKernelGGL(( bulk_get_cooperative), dim3((nitems*32-1)/bulk_block_size+1), dim3(bulk_block_size), 0, 0, qf, hashes, nitems, misses); hipDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> sort_diff = midpoint-start; std::chrono::duration<double> diff = end-midpoint; std::cout << "sorted " << nitems << " in " << sort_diff.count() << " seconds\n"; std::cout << "Queried " << nitems << " in " << diff.count() << " seconds\n"; uint64_t output = misses[0]; hipFree(misses); return output; } __host__ uint64_t point_get_wrapper(QF * qf, uint64_t * hashes, uint64_t nitems){ // uint64_t * misses; // //this is fine, should never be triggered // hipMallocManaged((void **)&misses, sizeof(uint64_t)); // hipMemset(misses, 0, sizeof(uint64_t)); hipLaunchKernelGGL(( point_bulk_get_nocount), dim3((nitems-1)/512+1), dim3(512), 0, 0, qf, hashes, nitems); hipDeviceSynchronize(); // uint64_t toReturn = *misses; // hipFree(misses); // return toReturn; return 0; } __host__ uint64_t point_get_wrapper_fp(QF * qf, uint64_t * hashes, uint64_t nitems){ uint64_t * misses; //this is fine, should never be triggered hipMallocManaged((void **)&misses, sizeof(uint64_t)); hipMemset(misses, 0, sizeof(uint64_t)); hipLaunchKernelGGL(( point_bulk_get), dim3((nitems-1)/512+1), dim3(512), 0, 0, qf, hashes, nitems, misses); hipDeviceSynchronize(); uint64_t toReturn = *misses; hipFree(misses); return toReturn; //return 0; } __global__ void point_bulk_insert(QF * qf, uint64_t * hashes, uint64_t nitems){ uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >=nitems) return; //#if DROP_ON_RUNEND point_insert(qf, hashes[tid], 0, 0); // #else // assert(point_insert(qf, hashes[tid], 0, 0) != QF_FULL); // #endif } __global__ void point_bulk_insert_cooperative(QF * qf, uint64_t * hashes, uint64_t nitems){ uint64_t itemID = threadIdx.x + blockIdx.x * blockDim.x; uint64_t tid = itemID / 32; int warpID = itemID % 32; if (tid >=nitems) return; uint8_t retvalue; assert(point_insert_not_exists_cooperative(qf, hashes[tid], 0, retvalue, 0, warpID) != QF_FULL); } //set a uint64_t reference to point at device memory; __global__ void get_dev_nvals(QF* qf, uint64_t * external_nvals){ uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid >= 1) return; external_nvals[0] = qf->runtimedata->num_locks; } //modified version of buffers_provided - performs an initial bulk hash, should save work over other versions //note: this DOES modify the given buffer - fine for all versions now //This variant performs an ititial sort that allows us to save time overall //as we avoid the atomic count-off and any sort of cross-thread communication __host__ void bulk_insert(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) { uint64_t key_block_size = 32; uint64_t key_block = (nvals -1)/key_block_size + 1; //start with num_locks, get counts //This is slow, but there isn't a better way to do it //we'll have to see how this affects performance uint64_t * dev_num_locks; hipMallocManaged((void **)&dev_num_locks, sizeof(uint64_t)); hipLaunchKernelGGL(( get_dev_nvals), dim3(1),dim3(1), 0, 0, qf, dev_num_locks); hipDeviceSynchronize(); uint64_t num_locks = dev_num_locks[0]; hipFree(dev_num_locks); //keys are hashed, now need to treat them as hashed in all further functions hipLaunchKernelGGL(( hash_all), dim3(key_block), dim3(key_block_size), 0, 0, qf, keys, keys, nvals, flags); thrust::sort(thrust::device, keys, keys+nvals); hipLaunchKernelGGL(( set_buffers_binary), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nvals, keys, flags); hipLaunchKernelGGL(( set_buffer_lens), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nvals, keys); //insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes); //return; const int bulk_block_size = 32; uint64_t evenness = 0; hipLaunchKernelGGL(( insert_from_buffers_hashed), dim3((num_locks-1)/bulk_block_size+1), dim3(bulk_block_size), 0, 0, qf, evenness); evenness = 1; hipLaunchKernelGGL(( insert_from_buffers_hashed), dim3((num_locks-1)/bulk_block_size+1), dim3(bulk_block_size), 0, 0, qf, evenness); } __host__ void bulk_insert_cooperative(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) { uint64_t key_block_size = 32; uint64_t key_block = (nvals -1)/key_block_size + 1; //start with num_locks, get counts //This is slow, but there isn't a better way to do it //we'll have to see how this affects performance uint64_t * dev_num_locks; hipMallocManaged((void **)&dev_num_locks, sizeof(uint64_t)); hipLaunchKernelGGL(( get_dev_nvals), dim3(1),dim3(1), 0, 0, qf, dev_num_locks); hipDeviceSynchronize(); uint64_t num_locks = dev_num_locks[0]; hipFree(dev_num_locks); //keys are hashed, now need to treat them as hashed in all further functions hipLaunchKernelGGL(( hash_all), dim3(key_block), dim3(key_block_size), 0, 0, qf, keys, keys, nvals, flags); thrust::sort(thrust::device, keys, keys+nvals); hipLaunchKernelGGL(( set_buffers_binary), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nvals, keys, flags); hipLaunchKernelGGL(( set_buffer_lens), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nvals, keys); //insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes); //return; uint64_t evenness = 0; hipLaunchKernelGGL(( insert_from_buffers_cooperative), dim3((32*num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, evenness); evenness = 1; hipLaunchKernelGGL(( insert_from_buffers_cooperative), dim3((32*num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, evenness); } //modified version of buffers_provided - performs an initial bulk hash, should save work over other versions //note: this DOES modify the given buffer - fine for all versions now //This variant performs an ititial sort that allows us to save time overall //as we avoid the atomic count-off and any sort of cross-thread communication __host__ void bulk_insert_reduce(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) { uint64_t key_block_size = 32; uint64_t key_block = (nvals -1)/key_block_size + 1; //start with num_locks, get counts //This is slow, but there isn't a better way to uint64_t * dev_num_locks; hipMallocManaged((void **)&dev_num_locks, sizeof(uint64_t)); hipLaunchKernelGGL(( get_dev_nvals), dim3(1),dim3(1), 0, 0, qf, dev_num_locks); hipDeviceSynchronize(); uint64_t num_locks = dev_num_locks[0]; hipFree(dev_num_locks); //keys are hashed, now need to treat them as hashed in all further functions hipLaunchKernelGGL(( hash_all), dim3(key_block), dim3(key_block_size), 0, 0, qf, keys, keys, nvals, flags); thrust::sort(thrust::device, keys, keys+nvals); thrust::device_ptr<uint64_t> keys_ptr(keys); thrust::device_ptr<uint64_t> dupe_counts= thrust::device_malloc<uint64_t>(nvals); thrust::fill(dupe_counts, dupe_counts+nvals, 1); thrust::device_ptr<uint64_t> thrust_keys = thrust::device_malloc<uint64_t>(nvals); thrust::device_ptr <uint64_t> thrust_vals = thrust::device_malloc<uint64_t>(nvals); thrust::pair<thrust::device_ptr<uint64_t>,thrust::device_ptr<uint64_t>> new_end; new_end = thrust::reduce_by_key(thrust::device, keys_ptr, keys_ptr+nvals, dupe_counts, thrust_keys, thrust_vals); hipDeviceSynchronize(); uint64_t new_nvals = new_end.first - thrust_keys; printf("New nvals %llu\n", new_nvals); printf("Error after this is pointer cast?\n"); uint64_t * new_keys = thrust::raw_pointer_cast(thrust_keys); uint64_t * new_key_counts = thrust::raw_pointer_cast(thrust_vals); hipDeviceSynchronize(); printf("Error after this in binary?\n"); //set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, new_nvals, slots_per_lock, new_keys, num_locks, buffers, flags); hipLaunchKernelGGL(( set_buffers_binary), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, new_nvals, new_keys, flags); //set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, new_nvals, new_keys, num_locks, (uint64_t *) buffer_sizes, buffers); hipLaunchKernelGGL(( set_buffer_lens), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, new_nvals, new_keys); //insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes); //return; hipDeviceSynchronize(); printf("Thrust buffers attached\n"); uint64_t evenness = 0; hipLaunchKernelGGL(( insert_from_buffers_thrust), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, evenness, new_keys,new_key_counts, new_nvals); evenness = 1; hipLaunchKernelGGL(( insert_from_buffers_thrust), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, evenness, new_keys, new_key_counts, new_nvals); hipDeviceSynchronize(); printf("Insertion done.\n"); //free resources thrust::device_free(thrust_keys); thrust::device_free(thrust_vals); thrust::device_free(dupe_counts); } __host__ void bulk_delete(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) { uint64_t key_block_size = 32; uint64_t key_block = (nvals -1)/key_block_size + 1; //start with num_locks, get counts //This is slow, but there isn't a better way to uint64_t * dev_num_locks; hipMallocManaged((void **)&dev_num_locks, sizeof(uint64_t)); hipLaunchKernelGGL(( get_dev_nvals), dim3(1),dim3(1), 0, 0, qf, dev_num_locks); hipDeviceSynchronize(); uint64_t num_locks = dev_num_locks[0]; hipFree(dev_num_locks); //keys are hashed, now need to treat them as hashed in all further functions hipLaunchKernelGGL(( hash_all), dim3(key_block), dim3(key_block_size), 0, 0, qf, keys, keys, nvals, flags); thrust::sort(thrust::device, keys, keys+nvals); //set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, slots_per_lock, keys, num_locks, buffers, flags); hipLaunchKernelGGL(( set_buffers_binary), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nvals, keys, flags); //set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys, num_locks, (uint64_t *) buffer_sizes, buffers); hipLaunchKernelGGL(( set_buffer_lens), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, nvals, keys); //insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes); //return; uint64_t evenness = 0; hipLaunchKernelGGL(( delete_from_buffers_hashed), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, evenness); evenness = 1; hipLaunchKernelGGL(( delete_from_buffers_hashed), dim3((num_locks-1)/key_block_size+1), dim3(key_block_size), 0, 0, qf, evenness); } __global__ void bulk_get_nocount(QF * qf, uint64_t * vals, uint64_t nvals, uint64_t key_count, uint8_t flags){ uint64_t tid = threadIdx.x+blockDim.x*blockIdx.x; if (tid >= nvals) return; uint64_t count = qf_count_key_value(qf, vals[tid], 0, 0); return; } __global__ void bulk_get_misses(QF * qf, uint64_t * vals, uint64_t nvals, uint64_t key_count, uint64_t * counter, uint8_t flags){ uint64_t tid = threadIdx.x + blockDim.x * blockIdx.x; //should never happen, but just in case if (tid >= nvals) return; uint64_t count = qf_count_key_value(qf, vals[tid], 0, 0); if (count < key_count) { atomicAdd((long long unsigned int *)counter, (long long unsigned int) 1); } } __global__ void bulk_get_hits_kernel(QF * qf, uint64_t * vals, bool * hits, uint64_t nvals){ uint64_t tid = threadIdx.x + blockDim.x * blockIdx.x; //should never happen, but just in case if (tid >= nvals) return; uint64_t count = qf_count_key_value(qf, vals[tid], 0, 0); if (count >= 1) { hits[tid] = 1; } } __host__ void qf_bulk_insert(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags){ bulk_insert(qf, nvals, keys, flags); } __host__ void bulk_get_hits(QF * qf, uint64_t * vals, bool * hits, uint64_t nvals){ hipLaunchKernelGGL(( bulk_get_hits_kernel), dim3((nvals -1)/512+1), dim3(512), 0, 0, qf, vals, hits, nvals); } __global__ void bulk_get_kernel(QF * qf, uint64_t * vals, uint64_t nvals, uint64_t * returns, uint8_t flags){ uint64_t tid = threadIdx.x + blockDim.x * blockIdx.x; //should never happen, but just in case if (tid >= nvals) return; returns[tid] = qf_count_key_value(qf, vals[tid], 0, flags); } __host__ void bulk_get(QF * qf, uint64_t nvals, uint64_t * vals, uint64_t * returns){ hipLaunchKernelGGL(( bulk_get_kernel), dim3((nvals-1)/512+1), dim3(512), 0, 0, qf, vals, nvals, returns, QF_NO_LOCK); } __host__ uint64_t bulk_get_misses_wrapper(QF * qf, uint64_t * vals, uint64_t nvals){ uint64_t * misses; //this is fine, should never be triggered hipMallocManaged((void **)&misses, sizeof(uint64_t)); hipMemset(misses, 0, sizeof(uint64_t)); hipLaunchKernelGGL(( bulk_get_misses), dim3((nvals-1)/512+1), dim3(512), 0, 0, qf, vals, nvals, 1, misses, QF_NO_LOCK); hipDeviceSynchronize(); uint64_t toReturn = *misses; hipFree(misses); return toReturn; //return 0; } //this bad boy doesn't check __host__ uint64_t bulk_get_nocount_wrapper(QF * qf, uint64_t * vals, uint64_t nvals){ hipLaunchKernelGGL(( bulk_get_nocount), dim3((nvals-1)/512+1), dim3(512), 0, 0, qf, vals, nvals, 1, QF_NO_LOCK); hipDeviceSynchronize(); return 0; //return 0; } __host__ __device__ int qf_set_count(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t flags) { if (count == 0) return 0; uint64_t cur_count = qf_count_key_value(qf, key, value, flags); int64_t delta = count - cur_count; int ret; if (delta == 0) ret = 0; else if (delta > 0) ret = qf_insert(qf, key, value, delta, flags); else ret = qf_remove(qf, key, value, labs(delta), flags); return ret; } __host__ __device__ int qf_remove(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t flags) { if (count == 0) return true; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); return _remove(qf, hash, count, flags); } __host__ __device__ int qf_delete_key_value(QF *qf, uint64_t key, uint64_t value, uint8_t flags) { uint64_t count = qf_count_key_value(qf, key, value, flags); if (count == 0) return true; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); return _remove(qf, hash, count, flags); } __host__ __device__ uint64_t qf_count_key_value(const QF *qf, uint64_t key, uint64_t value, uint8_t flags) { if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); int64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; if (!is_occupied(qf, hash_bucket_index)) return 0; int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index-1) + 1; if (runstart_index < hash_bucket_index) runstart_index = hash_bucket_index; /* printf("MC RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t current_remainder, current_count, current_end; do { current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); if (current_remainder == hash_remainder) return current_count; runstart_index = current_end + 1; } while (!is_runend(qf, current_end)); return 0; } __host__ __device__ uint64_t qf_query(const QF *qf, uint64_t key, uint64_t *value, uint8_t flags) { if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key; uint64_t hash_remainder = hash & BITMASK(qf->metadata->key_remainder_bits); int64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits; if (!is_occupied(qf, hash_bucket_index)) return 0; int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index-1) + 1; if (runstart_index < hash_bucket_index) runstart_index = hash_bucket_index; /* printf("MC RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t current_remainder, current_count, current_end; do { current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); *value = current_remainder & BITMASK(qf->metadata->value_bits); current_remainder = current_remainder >> qf->metadata->value_bits; if (current_remainder == hash_remainder) { return current_count; } runstart_index = current_end + 1; } while (!is_runend(qf, current_end)); return 0; } __host__ __device__ int64_t qf_get_unique_index(const QF *qf, uint64_t key, uint64_t value, uint8_t flags) { if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); int64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; if (!is_occupied(qf, hash_bucket_index)) return QF_DOESNT_EXIST; int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index-1) + 1; if (runstart_index < hash_bucket_index) runstart_index = hash_bucket_index; /* printf("MC RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t current_remainder, current_count, current_end; do { current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); if (current_remainder == hash_remainder) return runstart_index; runstart_index = current_end + 1; } while (!is_runend(qf, current_end)); return QF_DOESNT_EXIST; } enum qf_hashmode qf_get_hashmode(const QF *qf) { return qf->metadata->hash_mode; } uint64_t qf_get_hash_seed(const QF *qf) { return qf->metadata->seed; } __uint64_t qf_get_hash_range(const QF *qf) { return qf->metadata->range; } bool qf_is_auto_resize_enabled(const QF *qf) { if (qf->runtimedata->auto_resize == 1) return true; return false; } uint64_t qf_get_total_size_in_bytes(const QF *qf) { return qf->metadata->total_size_in_bytes; } uint64_t qf_get_nslots(const QF *qf) { return qf->metadata->nslots; } uint64_t qf_get_num_occupied_slots(const QF *qf) { pc_sync(&qf->runtimedata->pc_noccupied_slots); return qf->metadata->noccupied_slots; } uint64_t qf_get_num_key_bits(const QF *qf) { return qf->metadata->key_bits; } uint64_t qf_get_num_value_bits(const QF *qf) { return qf->metadata->value_bits; } uint64_t qf_get_num_key_remainder_bits(const QF *qf) { return qf->metadata->key_remainder_bits; } uint64_t qf_get_bits_per_slot(const QF *qf) { return qf->metadata->bits_per_slot; } uint64_t qf_get_sum_of_counts(const QF *qf) { pc_sync(&qf->runtimedata->pc_nelts); return qf->metadata->nelts; } uint64_t qf_get_num_distinct_key_value_pairs(const QF *qf) { pc_sync(&qf->runtimedata->pc_ndistinct_elts); return qf->metadata->ndistinct_elts; } void qf_sync_counters(const QF *qf) { pc_sync(&qf->runtimedata->pc_ndistinct_elts); pc_sync(&qf->runtimedata->pc_nelts); pc_sync(&qf->runtimedata->pc_noccupied_slots); } /* initialize the iterator at the run corresponding * to the position index */ int64_t qf_iterator_from_position(const QF *qf, QFi *qfi, uint64_t position) { if (position == 0xffffffffffffffff) { qfi->current = 0xffffffffffffffff; qfi->qf = qf; return QFI_INVALID; } assert(position < qf->metadata->nslots); if (!is_occupied(qf, position)) { uint64_t block_index = position; uint64_t idx = bitselect(get_block(qf, block_index)->occupieds[0], 0); if (idx == 64) { while(idx == 64 && block_index < qf->metadata->nblocks) { block_index++; idx = bitselect(get_block(qf, block_index)->occupieds[0], 0); } } position = block_index * QF_SLOTS_PER_BLOCK + idx; } qfi->qf = qf; qfi->num_clusters = 0; qfi->run = position; qfi->current = position == 0 ? 0 : run_end(qfi->qf, position-1) + 1; if (qfi->current < position) qfi->current = position; #ifdef LOG_CLUSTER_LENGTH qfi->c_info = (cluster_data* )calloc(qf->metadata->nslots/32, sizeof(cluster_data)); if (qfi->c_info == NULL) { perror("Couldn't allocate memory for c_info."); exit(EXIT_FAILURE); } qfi->cur_start_index = position; qfi->cur_length = 1; #endif if (qfi->current >= qf->metadata->nslots) return QFI_INVALID; return qfi->current; } int64_t qf_iterator_from_key_value(const QF *qf, QFi *qfi, uint64_t key, uint64_t value, uint8_t flags) { if (key >= qf->metadata->range) { qfi->current = 0xffffffffffffffff; qfi->qf = qf; return QFI_INVALID; } qfi->qf = qf; qfi->num_clusters = 0; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; bool flag = false; // If a run starts at "position" move the iterator to point it to the // smallest key greater than or equal to "hash". if (is_occupied(qf, hash_bucket_index)) { uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index-1) + 1; if (runstart_index < hash_bucket_index) runstart_index = hash_bucket_index; uint64_t current_remainder, current_count, current_end; do { current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); if (current_remainder >= hash_remainder) { flag = true; break; } runstart_index = current_end + 1; } while (!is_runend(qf, current_end)); // found "hash" or smallest key greater than "hash" in this run. if (flag) { qfi->run = hash_bucket_index; qfi->current = runstart_index; } } // If a run doesn't start at "position" or the largest key in the run // starting at "position" is smaller than "hash" then find the start of the // next run. if (!is_occupied(qf, hash_bucket_index) || !flag) { uint64_t position = hash_bucket_index; assert(position < qf->metadata->nslots); uint64_t block_index = position / QF_SLOTS_PER_BLOCK; uint64_t idx = bitselect(get_block(qf, block_index)->occupieds[0], 0); if (idx == 64) { while(idx == 64 && block_index < qf->metadata->nblocks) { block_index++; idx = bitselect(get_block(qf, block_index)->occupieds[0], 0); } } position = block_index * QF_SLOTS_PER_BLOCK + idx; qfi->run = position; qfi->current = position == 0 ? 0 : run_end(qfi->qf, position-1) + 1; if (qfi->current < position) qfi->current = position; } if (qfi->current >= qf->metadata->nslots) return QFI_INVALID; return qfi->current; } static int qfi_get(const QFi *qfi, uint64_t *key, uint64_t *value, uint64_t *count) { if (qfi_end(qfi)) return QFI_INVALID; uint64_t current_remainder, current_count; decode_counter(qfi->qf, qfi->current, &current_remainder, &current_count); *value = current_remainder & BITMASK(qfi->qf->metadata->value_bits); current_remainder = current_remainder >> qfi->qf->metadata->value_bits; *key = (qfi->run << qfi->qf->metadata->key_remainder_bits) | current_remainder; *count = current_count; return 0; } int qfi_get_key(const QFi *qfi, uint64_t *key, uint64_t *value, uint64_t *count) { *key = *value = *count = 0; int ret = qfi_get(qfi, key, value, count); if (ret == 0) { if (qfi->qf->metadata->hash_mode == QF_HASH_DEFAULT) { *key = 0; *value = 0; *count = 0; return QF_INVALID; } else if (qfi->qf->metadata->hash_mode == QF_HASH_INVERTIBLE) *key = hash_64i(*key, BITMASK(qfi->qf->metadata->key_bits)); } return ret; } int qfi_get_hash(const QFi *qfi, uint64_t *key, uint64_t *value, uint64_t *count) { *key = *value = *count = 0; return qfi_get(qfi, key, value, count); } int qfi_next(QFi *qfi) { if (qfi_end(qfi)) return QFI_INVALID; else { /* move to the end of the current counter*/ uint64_t current_remainder, current_count; qfi->current = decode_counter(qfi->qf, qfi->current, &current_remainder, &current_count); if (!is_runend(qfi->qf, qfi->current)) { qfi->current++; #ifdef LOG_CLUSTER_LENGTH qfi->cur_length++; #endif if (qfi_end(qfi)) return QFI_INVALID; return 0; } else { #ifdef LOG_CLUSTER_LENGTH /* save to check if the new current is the new cluster. */ uint64_t old_current = qfi->current; #endif uint64_t block_index = qfi->run / QF_SLOTS_PER_BLOCK; uint64_t rank = bitrank(get_block(qfi->qf, block_index)->occupieds[0], qfi->run % QF_SLOTS_PER_BLOCK); uint64_t next_run = bitselect(get_block(qfi->qf, block_index)->occupieds[0], rank); if (next_run == 64) { rank = 0; while (next_run == 64 && block_index < qfi->qf->metadata->nblocks) { block_index++; next_run = bitselect(get_block(qfi->qf, block_index)->occupieds[0], rank); } } if (block_index == qfi->qf->metadata->nblocks) { /* set the index values to max. */ qfi->run = qfi->current = qfi->qf->metadata->xnslots; return QFI_INVALID; } qfi->run = block_index * QF_SLOTS_PER_BLOCK + next_run; qfi->current++; if (qfi->current < qfi->run) qfi->current = qfi->run; #ifdef LOG_CLUSTER_LENGTH if (qfi->current > old_current + 1) { /* new cluster. */ if (qfi->cur_length > 10) { qfi->c_info[qfi->num_clusters].start_index = qfi->cur_start_index; qfi->c_info[qfi->num_clusters].length = qfi->cur_length; qfi->num_clusters++; } qfi->cur_start_index = qfi->run; qfi->cur_length = 1; } else { qfi->cur_length++; } #endif return 0; } } } bool qfi_end(const QFi *qfi) { if (qfi->current >= qfi->qf->metadata->xnslots /*&& is_runend(qfi->qf, qfi->current)*/) return true; return false; } /* * Merge qfa and qfb into qfc */ /* * iterate over both qf (qfa and qfb) * simultaneously * for each index i * min(get_value(qfa, ia) < get_value(qfb, ib)) * insert(min, ic) * increment either ia or ib, whichever is minimum. */ void qf_merge(const QF *qfa, const QF *qfb, QF *qfc) { QFi qfia, qfib; qf_iterator_from_position(qfa, &qfia, 0); qf_iterator_from_position(qfb, &qfib, 0); if (qfa->metadata->hash_mode != qfc->metadata->hash_mode && qfa->metadata->seed != qfc->metadata->seed && qfb->metadata->hash_mode != qfc->metadata->hash_mode && qfb->metadata->seed != qfc->metadata->seed) { fprintf(stderr, "Output QF and input QFs do not have the same hash mode or seed.\n"); exit(1); } uint64_t keya, valuea, counta, keyb, valueb, countb; qfi_get_hash(&qfia, &keya, &valuea, &counta); qfi_get_hash(&qfib, &keyb, &valueb, &countb); do { if (keya < keyb) { qf_insert(qfc, keya, valuea, counta, QF_NO_LOCK | QF_KEY_IS_HASH); qfi_next(&qfia); qfi_get_hash(&qfia, &keya, &valuea, &counta); } else { qf_insert(qfc, keyb, valueb, countb, QF_NO_LOCK | QF_KEY_IS_HASH); qfi_next(&qfib); qfi_get_hash(&qfib, &keyb, &valueb, &countb); } } while(!qfi_end(&qfia) && !qfi_end(&qfib)); if (!qfi_end(&qfia)) { do { qfi_get_hash(&qfia, &keya, &valuea, &counta); qf_insert(qfc, keya, valuea, counta, QF_NO_LOCK | QF_KEY_IS_HASH); } while(!qfi_next(&qfia)); } if (!qfi_end(&qfib)) { do { qfi_get_hash(&qfib, &keyb, &valueb, &countb); qf_insert(qfc, keyb, valueb, countb, QF_NO_LOCK | QF_KEY_IS_HASH); } while(!qfi_next(&qfib)); } } /* * Merge an array of qfs into the resultant QF */ void qf_multi_merge(const QF *qf_arr[], int nqf, QF *qfr) { int i; QFi qfi_arr[nqf]; int smallest_idx = 0; uint64_t smallest_key = UINT64_MAX; for (i=0; i<nqf; i++) { if (qf_arr[i]->metadata->hash_mode != qfr->metadata->hash_mode && qf_arr[i]->metadata->seed != qfr->metadata->seed) { fprintf(stderr, "Output QF and input QFs do not have the same hash mode or seed.\n"); exit(1); } qf_iterator_from_position(qf_arr[i], &qfi_arr[i], 0); } DEBUG_CQF("Merging %d CQFs\n", nqf); for (i=0; i<nqf; i++) { DEBUG_CQF("CQF %d\n", i); DEBUG_DUMP(qf_arr[i]); } while (nqf > 1) { uint64_t keys[nqf]; uint64_t values[nqf]; uint64_t counts[nqf]; for (i=0; i<nqf; i++) qfi_get_hash(&qfi_arr[i], &keys[i], &values[i], &counts[i]); do { smallest_key = UINT64_MAX; for (i=0; i<nqf; i++) { if (keys[i] < smallest_key) { smallest_key = keys[i]; smallest_idx = i; } } qf_insert(qfr, keys[smallest_idx], values[smallest_idx], counts[smallest_idx], QF_NO_LOCK | QF_KEY_IS_HASH); qfi_next(&qfi_arr[smallest_idx]); qfi_get_hash(&qfi_arr[smallest_idx], &keys[smallest_idx], &values[smallest_idx], &counts[smallest_idx]); } while(!qfi_end(&qfi_arr[smallest_idx])); /* remove the qf that is exhausted from the array */ if (smallest_idx < nqf-1) memmove(&qfi_arr[smallest_idx], &qfi_arr[smallest_idx+1], (nqf-smallest_idx-1)*sizeof(qfi_arr[0])); nqf--; } if (!qfi_end(&qfi_arr[0])) { uint64_t iters = 0; do { uint64_t key, value, count; qfi_get_hash(&qfi_arr[0], &key, &value, &count); qf_insert(qfr, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH); qfi_next(&qfi_arr[0]); iters++; } while(!qfi_end(&qfi_arr[0])); DEBUG_CQF("Num of iterations: %lu\n", iters); } DEBUG_CQF("%s", "Final CQF after merging.\n"); DEBUG_DUMP(qfr); return; } /* find cosine similarity between two QFs. */ uint64_t qf_inner_product(const QF *qfa, const QF *qfb) { uint64_t acc = 0; QFi qfi; const QF *qf_mem, *qf_disk; if (qfa->metadata->hash_mode != qfb->metadata->hash_mode && qfa->metadata->seed != qfb->metadata->seed) { fprintf(stderr, "Input QFs do not have the same hash mode or seed.\n"); exit(1); } // create the iterator on the larger QF. if (qfa->metadata->total_size_in_bytes > qfb->metadata->total_size_in_bytes) { qf_mem = qfb; qf_disk = qfa; } else { qf_mem = qfa; qf_disk = qfb; } qf_iterator_from_position(qf_disk, &qfi, 0); do { uint64_t key = 0, value = 0, count = 0; uint64_t count_mem; qfi_get_hash(&qfi, &key, &value, &count); if ((count_mem = qf_count_key_value(qf_mem, key, 0, QF_KEY_IS_HASH)) > 0) { acc += count*count_mem; } } while (!qfi_next(&qfi)); return acc; } /* find cosine similarity between two QFs. */ void qf_intersect(const QF *qfa, const QF *qfb, QF *qfr) { QFi qfi; const QF *qf_mem, *qf_disk; if (qfa->metadata->hash_mode != qfr->metadata->hash_mode && qfa->metadata->seed != qfr->metadata->seed && qfb->metadata->hash_mode != qfr->metadata->hash_mode && qfb->metadata->seed != qfr->metadata->seed) { fprintf(stderr, "Output QF and input QFs do not have the same hash mode or seed.\n"); exit(1); } // create the iterator on the larger QF. if (qfa->metadata->total_size_in_bytes > qfb->metadata->total_size_in_bytes) { qf_mem = qfb; qf_disk = qfa; } else { qf_mem = qfa; qf_disk = qfb; } qf_iterator_from_position(qf_disk, &qfi, 0); do { uint64_t key = 0, value = 0, count = 0; qfi_get_hash(&qfi, &key, &value, &count); if (qf_count_key_value(qf_mem, key, 0, QF_KEY_IS_HASH) > 0) qf_insert(qfr, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH); } while (!qfi_next(&qfi)); }
861e9e7e9972b424147ca29b0da9011da6d921d5.cu
/* * ============================================================================ * * Authors: Prashant Pandey <ppandey@cs.stonybrook.edu> * Rob Johnson <robj@vmware.com> * Hunter McCoy <hjmccoy@lbl.gov> * * ============================================================================ */ #include <cuda.h> #include <cuda_runtime_api.h> #include <stdlib.h> #include <assert.h> #include <string.h> #include <inttypes.h> #include <stdio.h> #include <unistd.h> #include <math.h> #include <time.h> #include <sys/mman.h> #include <sys/stat.h> #include <fcntl.h> //timing stuff #include <chrono> #include <iostream> #include <cmath> //how fast is a thrust sort? #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/fill.h> #include <thrust/memory.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include "hashutil.cuh" #include "gqf.cuh" #include "gqf_int.cuh" #include <stdexcept> #include <cuda_profiler_api.h> /****************************************************************** * Code for managing the metadata bits and slots w/o interpreting * * the content of the slots. ******************************************************************/ #define MAX_VALUE(nbits) ((1ULL << (nbits)) - 1) #define BITMASK(nbits) \ ((nbits) == 64 ? 0xffffffffffffffff : MAX_VALUE(nbits)) #define NUM_SLOTS_TO_LOCK (1ULL<<13) #define LOCK_DIST 64 #define EXP_BEFORE_FAILURE -15 #define CLUSTER_SIZE (1ULL<<14) #define METADATA_WORD(qf,field,slot_index) \ (get_block((qf), (slot_index) / QF_SLOTS_PER_BLOCK)->field[((slot_index) % QF_SLOTS_PER_BLOCK) / 64]) #define GET_NO_LOCK(flag) (flag & QF_NO_LOCK) #define GET_TRY_ONCE_LOCK(flag) (flag & QF_TRY_ONCE_LOCK) #define GET_WAIT_FOR_LOCK(flag) (flag & QF_WAIT_FOR_LOCK) #define GET_KEY_HASH(flag) (flag & QF_KEY_IS_HASH) #define NUM_BUFFERS 10 #define MAX_BUFFER_SIZE 100 #define CYCLES_PER_SECOND 1601000000 #define MAX_DEPTH 16 #define SELECT_BOUND 32 #define DEBUG_ASSERTS 0 #define DROP_ON_RUNEND 0 #define RUNEND_CUTOFF 15 #define DROP_ON_BIG_CLUSTER 0 #define BIG_CLUSTER_DROPOFF 4096 #define DISTANCE_FROM_HOME_SLOT_CUTOFF 1000 #define BILLION 1000000000L #define CUDA_CHECK(ans) \ gpuAssert((ans), __FILE__, __LINE__); inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true) { if (code != cudaSuccess) { printf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __constant__ char kmer_vals[6] = {'F', 'A', 'C', 'T', 'G', '0'}; #ifdef DEBUG #define PRINT_DEBUG 1 #else #define PRINT_DEBUG 0 #endif #define DEBUG_CQF(fmt, ...) \ do { if (PRINT_DEBUG) printf( fmt, __VA_ARGS__); } while (0) #define DEBUG_DUMP(qf) \ do { if (PRINT_DEBUG) qf_dump_metadata(qf); } while (0) #if QF_BITS_PER_SLOT > 0 __host__ __device__ static inline qfblock* get_block(const QF* qf, uint64_t block_index) { return &qf->blocks[block_index]; } #else __host__ __device__ static inline qfblock* get_block(const QF* qf, uint64_t block_index) { return (qfblock*)(((char*)qf->blocks) + block_index * (sizeof(qfblock) + QF_SLOTS_PER_BLOCK * qf->metadata->bits_per_slot / 8)); } #endif /* __device__ static __inline__ unsigned long long rdtsc(void) { unsigned hi, lo; __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi)); return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 ); } */ /* __host__ __device__ static void modify_metadata(pc_t *metadata, int cnt) { pc_add(metadata, cnt); return; } */ /*changing sizes of register based on https://docs.nvidia.com/cuda/inline-ptx-assembly/index.html l is for "l" = .u64 reg */ __host__ __device__ static inline int popcnt(uint64_t val) { #ifdef __CUDA_ARCH__ val = __popcll(val); #else #ifndef __x86_64 val = __builtin_popcount(val); #else asm("popcnt %[val], %[val]" : [val] "+r" (val) : : "cc"); #endif #endif return val; } // __device__ static inline int64_t bitscanreverse(uint64_t val) // { // if (val == 0) { // return -1; // } else { // asm("bsr %[val], %[val]" // : [val] "+l" (val) // : // : ); // return val; // } // } __host__ __device__ static inline int popcntv(const uint64_t val, int ignore) { if (ignore % 64) return popcnt (val & ~BITMASK(ignore % 64)); else return popcnt(val); } // Returns the number of 1s up to (and including) the pos'th bit // Bits are numbered from 0 __host__ __device__ static inline int bitrank(uint64_t val, int pos) { val = val & ((2ULL << pos) - 1); #ifdef __CUDA_ARCH__ val = __popcll(val); #else //quick fix for summit #ifndef __x86_64 val = __builtin_popcount(val); #else asm("popcnt %[val], %[val]" : [val] "+r" (val) : : "cc"); #endif #endif return val; } //moved dump functions __host__ __device__ static inline void qf_dump_block(const QF *qf, uint64_t i) { uint64_t j; printf("Block %llu Runs from %llu to %llu\n",i, i*QF_SLOTS_PER_BLOCK, (i+1)*QF_SLOTS_PER_BLOCK); printf("Offset: %-192d", get_block(qf, i)->offset); printf("\n"); for (j = 0; j < QF_SLOTS_PER_BLOCK; j++) printf("%02lx ", j); printf("\n"); for (j = 0; j < QF_SLOTS_PER_BLOCK; j++) printf(" %d ", (get_block(qf, i)->occupieds[j/64] & (1ULL << (j%64))) ? 1 : 0); printf("\n"); for (j = 0; j < QF_SLOTS_PER_BLOCK; j++) printf(" %d ", (get_block(qf, i)->runends[j/64] & (1ULL << (j%64))) ? 1 : 0); printf("\n"); #if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 for (j = 0; j < QF_SLOTS_PER_BLOCK; j++) printf("%02x ", get_block(qf, i)->slots[j]); #elif QF_BITS_PER_SLOT == 64 for (j = 0; j < QF_SLOTS_PER_BLOCK; j++) printf("%02lx ", get_block(qf, i)->slots[j]); #else for (j = 0; j < QF_SLOTS_PER_BLOCK * qf->metadata->bits_per_slot / 8; j++) printf("%02x ", get_block(qf, i)->slots[j]); #endif printf("\n"); printf("\n"); } __host__ __device__ void qf_dump_metadata(const QF *qf) { printf("Slots: %lu Occupied: %lu Elements: %lu Distinct: %lu\n", qf->metadata->nslots, qf->metadata->noccupied_slots, qf->metadata->nelts, qf->metadata->ndistinct_elts); printf("Key_bits: %lu Value_bits: %lu Remainder_bits: %lu Bits_per_slot: %lu\n", qf->metadata->key_bits, qf->metadata->value_bits, qf->metadata->key_remainder_bits, qf->metadata->bits_per_slot); } __host__ __device__ void qf_dump(const QF *qf) { uint64_t i; printf("%lu %lu %lu\n", qf->metadata->nblocks, qf->metadata->ndistinct_elts, qf->metadata->nelts); for (i = 0; i < qf->metadata->nblocks; i++) { qf_dump_block(qf, i); } } /** * Returns the position of the k-th 1 in the 64-bit word x. * k is 0-based, so k=0 returns the position of the first 1. * * Uses the broadword selection algorithm by Vigna [1], improved by Gog * and Petri [2] and Vigna [3]. * * [1] Sebastiano Vigna. Broadword Implementation of Rank/Select * Queries. WEA, 2008 * * [2] Simon Gog, Matthias Petri. Optimized succinct data * structures for massive data. Softw. Pract. Exper., 2014 * * [3] Sebastiano Vigna. MG4J 5.2.1. http://mg4j.di.unimi.it/ * The following code is taken from * https://github.com/facebook/folly/blob/b28186247104f8b90cfbe094d289c91f9e413317/folly/experimental/Select64.h */ __device__ __constant__ uint8_t gpukSelectInByte[2048] = { 8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 8, 8, 8, 1, 8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1, 8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 7, 7, 1, 7, 2, 2, 1, 7, 3, 3, 1, 3, 2, 2, 1, 7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 8, 8, 8, 8, 8, 8, 2, 8, 8, 8, 3, 8, 3, 3, 2, 8, 8, 8, 4, 8, 4, 4, 2, 8, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 5, 8, 5, 5, 2, 8, 5, 5, 3, 5, 3, 3, 2, 8, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 6, 8, 6, 6, 2, 8, 6, 6, 3, 6, 3, 3, 2, 8, 6, 6, 4, 6, 4, 4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 8, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 7, 8, 7, 7, 2, 8, 7, 7, 3, 7, 3, 3, 2, 8, 7, 7, 4, 7, 4, 4, 2, 7, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 5, 7, 5, 5, 2, 7, 5, 5, 3, 5, 3, 3, 2, 7, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 6, 7, 6, 6, 2, 7, 6, 6, 3, 6, 3, 3, 2, 7, 6, 6, 4, 6, 4, 4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 3, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 4, 8, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 3, 8, 8, 8, 5, 8, 5, 5, 4, 8, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 3, 8, 8, 8, 6, 8, 6, 6, 4, 8, 6, 6, 4, 6, 4, 4, 3, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 3, 8, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 3, 8, 8, 8, 7, 8, 7, 7, 4, 8, 7, 7, 4, 7, 4, 4, 3, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 3, 8, 7, 7, 5, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 3, 8, 7, 7, 6, 7, 6, 6, 4, 7, 6, 6, 4, 6, 4, 4, 3, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 3, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 4, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 4, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 4, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7 }; // const uint8_t hostkSelectInByte[2048] = { // 8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, // 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, // 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, // 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, // 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 7, 0, // 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, // 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, // 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, // 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, // 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 8, 8, 8, 1, // 8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1, 8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, // 2, 1, 8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, // 4, 3, 3, 1, 3, 2, 2, 1, 8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, // 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, // 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 7, 7, 1, 7, 2, // 2, 1, 7, 3, 3, 1, 3, 2, 2, 1, 7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, // 7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, // 3, 1, 3, 2, 2, 1, 7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1, // 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, // 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 8, 8, 8, 8, 8, 8, 2, // 8, 8, 8, 3, 8, 3, 3, 2, 8, 8, 8, 4, 8, 4, 4, 2, 8, 4, 4, 3, 4, 3, 3, 2, 8, 8, // 8, 5, 8, 5, 5, 2, 8, 5, 5, 3, 5, 3, 3, 2, 8, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, // 4, 3, 3, 2, 8, 8, 8, 6, 8, 6, 6, 2, 8, 6, 6, 3, 6, 3, 3, 2, 8, 6, 6, 4, 6, 4, // 4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 8, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, // 6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 7, 8, 7, 7, 2, 8, 7, // 7, 3, 7, 3, 3, 2, 8, 7, 7, 4, 7, 4, 4, 2, 7, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 5, // 7, 5, 5, 2, 7, 5, 5, 3, 5, 3, 3, 2, 7, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, // 3, 2, 8, 7, 7, 6, 7, 6, 6, 2, 7, 6, 6, 3, 6, 3, 3, 2, 7, 6, 6, 4, 6, 4, 4, 2, // 6, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5, // 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 3, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 4, 8, 4, 4, 3, 8, 8, 8, 8, 8, 8, // 8, 5, 8, 8, 8, 5, 8, 5, 5, 3, 8, 8, 8, 5, 8, 5, 5, 4, 8, 5, 5, 4, 5, 4, 4, 3, // 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 3, 8, 8, 8, 6, 8, 6, 6, 4, 8, 6, // 6, 4, 6, 4, 4, 3, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 3, 8, 6, 6, 5, // 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, // 7, 3, 8, 8, 8, 7, 8, 7, 7, 4, 8, 7, 7, 4, 7, 4, 4, 3, 8, 8, 8, 7, 8, 7, 7, 5, // 8, 7, 7, 5, 7, 5, 5, 3, 8, 7, 7, 5, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 3, 8, 8, // 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 3, 8, 7, 7, 6, 7, 6, 6, 4, 7, 6, 6, 4, // 6, 4, 4, 3, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 3, 7, 6, 6, 5, 6, 5, // 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 4, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, // 6, 4, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5, 8, 8, 8, 6, 8, 6, 6, 5, // 8, 6, 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, // 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 4, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, // 8, 7, 7, 5, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 4, 8, 8, 8, 8, 8, 8, // 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 4, // 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, // 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, // 8, 6, 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, // 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 8, // 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, // 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, // 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7 // }; __host__ __device__ static inline uint64_t _select64(uint64_t x, int k) { if (k >= popcnt(x)) { return 64; } const uint64_t kOnesStep4 = 0x1111111111111111ULL; const uint64_t kOnesStep8 = 0x0101010101010101ULL; const uint64_t kMSBsStep8 = 0x80ULL * kOnesStep8; uint64_t s = x; s = s - ((s & 0xA * kOnesStep4) >> 1); s = (s & 0x3 * kOnesStep4) + ((s >> 2) & 0x3 * kOnesStep4); s = (s + (s >> 4)) & 0xF * kOnesStep8; uint64_t byteSums = s * kOnesStep8; uint64_t kStep8 = k * kOnesStep8; uint64_t geqKStep8 = (((kStep8 | kMSBsStep8) - byteSums) & kMSBsStep8); uint64_t place = popcnt(geqKStep8) * 8; uint64_t byteRank = k - (((byteSums << 8) >> place) & (uint64_t)(0xFF)); #ifdef __CUDA_ARCH__ return place + gpukSelectInByte[((x >> place) & 0xFF) | (byteRank << 8)]; #else abort(); return 0; //return place + hostkSelectInByte[((x >> place) & 0xFF) | (byteRank << 8)]; #endif // __CUDA_ARCH__ } // Returns the position of the rank'th 1. (rank = 0 returns the 1st 1) // Returns 64 if there are fewer than rank+1 1s. __host__ __device__ static inline uint64_t bitselect(uint64_t val, int rank) { #ifdef __SSE4_2_ uint64_t i = 1ULL << rank; asm("pdep %[val], %[mask], %[val]" : [val] "+r" (val) : [mask] "r" (i)); asm("tzcnt %[bit], %[index]" : [index] "=r" (i) : [bit] "g" (val) : "cc"); return i; #endif return _select64(val, rank); } __host__ __device__ static inline uint64_t bitselectv(const uint64_t val, int ignore, int rank) { return bitselect(val & ~BITMASK(ignore % 64), rank); } __host__ __device__ static inline int is_runend(const QF *qf, uint64_t index) { return (METADATA_WORD(qf, runends, index) >> ((index % QF_SLOTS_PER_BLOCK) % 64)) & 1ULL; } __host__ __device__ static inline int is_occupied(const QF *qf, uint64_t index) { return (METADATA_WORD(qf, occupieds, index) >> ((index % QF_SLOTS_PER_BLOCK) % 64)) & 1ULL; } #if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64 __host__ __device__ static inline uint64_t get_slot(const QF *qf, uint64_t index) { //ERR: Index passed in is incorrect //printf("slots %lu, index %lu\n", qf->metadata->nslots, index); #if DEBUG_ASSERTS assert(index < qf->metadata->xnslots); #endif return get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[index % QF_SLOTS_PER_BLOCK]; } __host__ __device__ static inline void set_slot(const QF *qf, uint64_t index, uint64_t value) { #if DEBUG_ASSERTS assert(index < qf->metadata->xnslots); #endif get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[index % QF_SLOTS_PER_BLOCK] = value & BITMASK(qf->metadata->bits_per_slot); } #elif QF_BITS_PER_SLOT > 0 /* Little-endian code .... Big-endian is TODO */ __host__ __device__ static inline uint64_t get_slot(const QF *qf, uint64_t index) { /* Should use __uint128_t to support up to 64-bit remainders, but gcc seems * to generate buggy code. :/ */ //printf("Other get slot: slots %lu, index %lu\n", qf->metadata->nslots, index); #if DEBUG_ASSERTS assert(index < qf->metadata->xnslots); #endif uint64_t *p = (uint64_t *)&get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[(index % QF_SLOTS_PER_BLOCK) * QF_BITS_PER_SLOT / 8]; return (uint64_t)(((*p) >> (((index % QF_SLOTS_PER_BLOCK) * QF_BITS_PER_SLOT) % 8)) & BITMASK(QF_BITS_PER_SLOT)); } __host__ __device__ static inline void set_slot(const QF *qf, uint64_t index, uint64_t value) { /* Should use __uint128_t to support up to 64-bit remainders, but gcc seems * to generate buggy code. :/ */ #if DEBUG_ASSERTS assert(index < qf->metadata->xnslots); #endif uint64_t *p = (uint64_t *)&get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[(index % QF_SLOTS_PER_BLOCK) * QF_BITS_PER_SLOT / 8]; uint64_t t = *p; uint64_t mask = BITMASK(QF_BITS_PER_SLOT); uint64_t v = value; int shift = ((index % QF_SLOTS_PER_BLOCK) * QF_BITS_PER_SLOT) % 8; mask <<= shift; v <<= shift; t &= ~mask; t |= v; *p = t; } #else /* Little-endian code .... Big-endian is TODO */ __host__ __device__ static inline uint64_t get_slot(const QF *qf, uint64_t index) { //rintf("Third get slot?!? slots %lu, index %lu\n", qf->metadata->nslots, index); #if DEBUG_ASSERTS assert(index < qf->metadata->xnslots); #endif /* Should use __uint128_t to support up to 64-bit remainders, but gcc seems * to generate buggy code. :/ */ uint64_t *p = (uint64_t *)&get_block(qf, index / QF_SLOTS_PER_BLOCK)->slots[(index %QF_SLOTS_PER_BLOCK)* qf->metadata->bits_per_slot / 8]; return (uint64_t)(((*p) >> (((index % QF_SLOTS_PER_BLOCK) *qf->metadata->bits_per_slot) % 8)) & BITMASK(qf->metadata->bits_per_slot)); } __host__ __device__ static inline void set_slot(const QF *qf, uint64_t index, uint64_t value) { #if DEBUG_ASSERTS assert(index < qf->metadata->xnslots); #endif /* Should use __uint128_t to support up to 64-bit remainders, but gcc seems * to generate buggy code. :/ */ uint64_t *p = (uint64_t *)&get_block(qf, index /QF_SLOTS_PER_BLOCK)->slots[(index %QF_SLOTS_PER_BLOCK)* qf->metadata->bits_per_slot / 8]; uint64_t t = *p; uint64_t mask = BITMASK(qf->metadata->bits_per_slot); uint64_t v = value; int shift = ((index % QF_SLOTS_PER_BLOCK) * qf->metadata->bits_per_slot) % 8; mask <<= shift; v <<= shift; t &= ~mask; t |= v; *p = t; } #endif __host__ __device__ static inline uint64_t run_end(const QF *qf, uint64_t hash_bucket_index); __host__ __device__ static inline uint64_t block_offset(const QF *qf, uint64_t blockidx) { /* If we have extended counters and a 16-bit (or larger) offset field, then we can safely ignore the possibility of overflowing that field. */ if (sizeof(qf->blocks[0].offset) > 1 || get_block(qf, blockidx)->offset < BITMASK(8*sizeof(qf->blocks[0].offset))) return get_block(qf, blockidx)->offset; return run_end(qf, QF_SLOTS_PER_BLOCK * blockidx - 1) - QF_SLOTS_PER_BLOCK * blockidx + 1; } __host__ __device__ static inline uint64_t run_end(const QF *qf, uint64_t hash_bucket_index) { uint64_t bucket_block_index = hash_bucket_index / QF_SLOTS_PER_BLOCK; uint64_t bucket_intrablock_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK; uint64_t bucket_blocks_offset = block_offset(qf, bucket_block_index); uint64_t bucket_intrablock_rank = bitrank(get_block(qf, bucket_block_index)->occupieds[0], bucket_intrablock_offset); if (bucket_intrablock_rank == 0) { if (bucket_blocks_offset <= bucket_intrablock_offset) return hash_bucket_index; else return QF_SLOTS_PER_BLOCK * bucket_block_index + bucket_blocks_offset - 1; } uint64_t runend_block_index = bucket_block_index + bucket_blocks_offset / QF_SLOTS_PER_BLOCK; uint64_t runend_ignore_bits = bucket_blocks_offset % QF_SLOTS_PER_BLOCK; uint64_t runend_rank = bucket_intrablock_rank - 1; uint64_t runend_block_offset = bitselectv(get_block(qf, runend_block_index)->runends[0], runend_ignore_bits, runend_rank); if (runend_block_offset == QF_SLOTS_PER_BLOCK) { if (bucket_blocks_offset == 0 && bucket_intrablock_rank == 0) { /* The block begins in empty space, and this bucket is in that region of * empty space */ return hash_bucket_index; } else { do { runend_rank -= popcntv(get_block(qf, runend_block_index)->runends[0], runend_ignore_bits); runend_block_index++; runend_ignore_bits = 0; runend_block_offset = bitselectv(get_block(qf, runend_block_index)->runends[0], runend_ignore_bits, runend_rank); } while (runend_block_offset == QF_SLOTS_PER_BLOCK); } } uint64_t runend_index = QF_SLOTS_PER_BLOCK * runend_block_index + runend_block_offset; if (runend_index < hash_bucket_index) return hash_bucket_index; else return runend_index; } __host__ __device__ static inline int offset_lower_bound(const QF *qf, uint64_t slot_index) { const qfblock * b = get_block(qf, slot_index / QF_SLOTS_PER_BLOCK); const uint64_t slot_offset = slot_index % QF_SLOTS_PER_BLOCK; const uint64_t boffset = b->offset; const uint64_t occupieds = b->occupieds[0] & BITMASK(slot_offset+1); //printf("slot %llu, slot_offset %02lx, block offset %llu, occupieds: %d ", slot_index, slot_offset, boffset, popcnt(occupieds)); #if DEBUG_ASSERTS assert(QF_SLOTS_PER_BLOCK == 64); #endif //if (boffset < slot_offset) { if (boffset <= slot_offset) { const uint64_t runends = (b->runends[0] & BITMASK(slot_offset)) >> boffset; //printf(" runends %d\n", popcnt(runends)); //printf("boffset < slot_offset, runends %llu, popcnt(occupieds) %d, popcnt(runends) %d\n", runends, popcnt(occupieds), popcnt(runends)); //printf("returning %d\n", popcnt(occupieds)-popcnt(runends)); return popcnt(occupieds) - popcnt(runends); } //printf("\n"); //printf("boffset > slot_offset, boffset-slotoffset %llu, popcnt(occupieds) %d\n", boffset-slot_offset, popcnt(occupieds)); //printf("returning %d\n", boffset-slot_offset+popcnt(occupieds)); return boffset - slot_offset + popcnt(occupieds); } __host__ __device__ static inline int is_empty(const QF *qf, uint64_t slot_index) { return offset_lower_bound(qf, slot_index) == 0; } __host__ __device__ static inline int might_be_empty(const QF *qf, uint64_t slot_index) { return !is_occupied(qf, slot_index) && !is_runend(qf, slot_index); } // __device__ static inline int probably_is_empty(const QF *qf, uint64_t slot_index) // { // return get_slot(qf, slot_index) == 0 // && !is_occupied(qf, slot_index) // && !is_runend(qf, slot_index); // } //static inlines were re-added, should __host__ __device__ uint64_t static inline find_first_empty_slot(QF *qf, uint64_t from) { uint64_t start_from = from; do { int t = offset_lower_bound(qf, from); //get block of from // if (t < 0){ // //this implies a failure in the code - you are going to // find_first_empty_slot_verbose(qf, start_from); // } //this assert breaks testing as we can't query the last slot for the next slot //this throws an assertion, instead we want to throw an out of range exception //that can be captured to finalize the test instead. #if DEBUG_ASSERTS assert(t>=0); #endif //assert must happen, checks cannot happen in device code //alternate version must exist that is host exclusive. //if (t < 0) throw std::out_of_range("next free slot is before current slot, either final slot or gqf corruption.\n"); if (t == 0) break; from = from + t; } while(1); uint64_t bucket_start_from = start_from/NUM_SLOTS_TO_LOCK; uint64_t end_start_from = from/NUM_SLOTS_TO_LOCK; //testing without this gate to check if we see speed improvements // if (end_start_from>bucket_start_from+1){ // //return -1; // printf("Find first empty ran over a bucket: %llu\n", end_start_from-bucket_start_from); // } return from; } __host__ __device__ uint64_t first_empty_slot_wrapper(QF * qf, uint64_t from){ return find_first_empty_slot(qf, from); } //exact same function as above, but forced to be host exclusive so that a try_catch statement in cluster counting will succeed. __host__ uint64_t host_debug_find_first_empty_slot(QF *qf, uint64_t from) { uint64_t start_from = from; do { int t = offset_lower_bound(qf, from); //get block of from // if (t < 0){ // //this implies a failure in the code - you are going to // find_first_empty_slot_verbose(qf, start_from); // } //this assert breaks testing as we can't query the last slot for the next slot //this throws an assertion, instead we want to throw an out of range exception //that can be captured to finalize the test instead. //assert(t>=0); //assert must happen, checks cannot happen in device code //alternate version must exist that is host exclusive. if (t < 0) throw std::out_of_range("next free slot is before current slot, either final slot or gqf corruption.\n"); if (t == 0) break; from = from + t; } while(1); uint64_t bucket_start_from = start_from/NUM_SLOTS_TO_LOCK; uint64_t end_start_from = from/NUM_SLOTS_TO_LOCK; //testing without this gate to check if we see speed improvements if (end_start_from>bucket_start_from+1){ printf("Find first empty ran over a bucket: %llu\n", end_start_from-bucket_start_from); } return from; } __host__ __device__ static inline uint64_t shift_into_b(const uint64_t a, const uint64_t b, const int bstart, const int bend, const int amount) { const uint64_t a_component = bstart == 0 ? (a >> (64 - amount)) : 0; const uint64_t b_shifted_mask = BITMASK(bend - bstart) << bstart; const uint64_t b_shifted = ((b_shifted_mask & b) << amount) & b_shifted_mask; const uint64_t b_mask = ~b_shifted_mask; return a_component | b_shifted | (b & b_mask); } // __device__ void* gpu_memmove(void* dst, const void* src, size_t n) // { // //printf("Launching memmove\n"); // //todo: allocate space per thread for this buffer before launching the kernel // void* temp_buffer = malloc(n); // //maybe stack allocation? // //void* temp_buffer = void* char[n]; // // cudaMemcpyAsync(temp_buffer, src, n, cudaMemcpyDeviceToDevice); // // cudaMemcpyAsync(dst, temp_buffer, n, cudaMemcpyDeviceToDevice); // // //cudaFree(temp_buffer); // // return dst; // memcpy(temp_buffer, src, n); // memcpy(dst, temp_buffer, n); // free(temp_buffer); // } //a variant of memmove that compares the two pointers __device__ void gpu_memmove(void* dst, const void* src, size_t n) { //printf("Launching memmove\n"); //todo: allocate space per thread for this buffer before launching the kernel char * char_dst = (char *) dst; char * char_src = (char *) src; //double check this, //think it is just > since dst+n does not get copied if (char_src+n > char_dst){ //copy backwards for (int i =n-1; i >= 0; i--){ char_dst[i] = char_src[i]; } } else { //copy regular for (int i =0; i<n; i++){ char_dst[i] = char_src[i]; } } //free(temp_buffer); } //a variant of memmove that compares the two pointers __device__ void gpu_memmove_cooperative(void* dst, const void* src, size_t n, int warpID) { //printf("Launching memmove\n"); //todo: allocate space per thread for this buffer before launching the kernel char * char_dst = (char *) dst; char * char_src = (char *) src; //double check this, //think it is just > since dst+n does not get copied if (char_src+n > char_dst){ //copy backwards for (int i =n-1-warpID; i >= 0; i-=32){ char_dst[i] = char_src[i]; } } else { //copy regular for (int i =warpID; i<n; i+=32){ char_dst[i] = char_src[i]; } } //free(temp_buffer); } #if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64 __host__ __device__ static inline void shift_remainders(QF *qf, uint64_t start_index, uint64_t empty_index) { uint64_t start_block = start_index / QF_SLOTS_PER_BLOCK; uint64_t start_offset = start_index % QF_SLOTS_PER_BLOCK; uint64_t empty_block = empty_index / QF_SLOTS_PER_BLOCK; uint64_t empty_offset = empty_index % QF_SLOTS_PER_BLOCK; #if DEBUG_ASSERTS assert (start_index <= empty_index); assert (empty_index < qf->metadata->xnslots); #endif while (start_block < empty_block) { #ifdef __CUDA_ARCH__ gpu_memmove(&get_block(qf, empty_block)->slots[1], &get_block(qf, empty_block)->slots[0], empty_offset * sizeof(qf->blocks[0].slots[0])); #else memmove(&get_block(qf, empty_block)->slots[1], &get_block(qf, empty_block)->slots[0], empty_offset * sizeof(qf->blocks[0].slots[0])); #endif get_block(qf, empty_block)->slots[0] = get_block(qf, empty_block-1)->slots[QF_SLOTS_PER_BLOCK-1]; empty_block--; empty_offset = QF_SLOTS_PER_BLOCK-1; } #ifdef __CUDA_ARCH__ gpu_memmove(&get_block(qf, empty_block)->slots[start_offset + 1], &get_block(qf, empty_block)->slots[start_offset], (empty_offset - start_offset) * sizeof(qf->blocks[0].slots[0])); #else memmove(&get_block(qf, empty_block)->slots[start_offset+1], &get_block(qf, empty_block)->slots[start_offset], (empty_offset - start_offset) * sizeof(qf->blocks[0].slots[0])); #endif } __device__ static inline void shift_remainders_cooperative(QF *qf, uint64_t start_index, uint64_t empty_index, int warpID) { uint64_t start_block = start_index / QF_SLOTS_PER_BLOCK; uint64_t start_offset = start_index % QF_SLOTS_PER_BLOCK; uint64_t empty_block = empty_index / QF_SLOTS_PER_BLOCK; uint64_t empty_offset = empty_index % QF_SLOTS_PER_BLOCK; #if DEBUG_ASSERTS assert (start_index <= empty_index); assert (empty_index < qf->metadata->xnslots); #endif while (start_block < empty_block) { gpu_memmove_cooperative(&get_block(qf, empty_block)->slots[1], &get_block(qf, empty_block)->slots[0], empty_offset * sizeof(qf->blocks[0].slots[0]), warpID); get_block(qf, empty_block)->slots[0] = get_block(qf, empty_block-1)->slots[QF_SLOTS_PER_BLOCK-1]; empty_block--; empty_offset = QF_SLOTS_PER_BLOCK-1; } gpu_memmove_cooperative(&get_block(qf, empty_block)->slots[start_offset + 1], &get_block(qf, empty_block)->slots[start_offset], (empty_offset - start_offset) * sizeof(qf->blocks[0].slots[0]), warpID); } #else #define REMAINDER_WORD(qf, i) ((uint64_t *)&(get_block(qf, (i)/qf->metadata->bits_per_slot)->slots[8 * ((i) % qf->metadata->bits_per_slot)])) __host__ __device__ static inline void shift_remainders(QF *qf, const uint64_t start_index, const uint64_t empty_index) { uint64_t last_word = (empty_index + 1) * qf->metadata->bits_per_slot / 64; const uint64_t first_word = start_index * qf->metadata->bits_per_slot / 64; int bend = ((empty_index + 1) * qf->metadata->bits_per_slot) % 64; const int bstart = (start_index * qf->metadata->bits_per_slot) % 64; while (last_word != first_word) { *REMAINDER_WORD(qf, last_word) = shift_into_b(*REMAINDER_WORD(qf, last_word-1), *REMAINDER_WORD(qf, last_word), 0, bend, qf->metadata->bits_per_slot); last_word--; bend = 64; } *REMAINDER_WORD(qf, last_word) = shift_into_b(0, *REMAINDER_WORD(qf, last_word), bstart, bend, qf->metadata->bits_per_slot); } #endif __host__ __device__ static inline void find_next_n_empty_slots(QF *qf, uint64_t from, uint64_t n, uint64_t *indices) { while (n) { indices[--n] = find_first_empty_slot(qf, from); from = indices[n] + 1; } } __host__ __device__ static inline void shift_slots(QF *qf, int64_t first, uint64_t last, uint64_t distance) { int64_t i; if (distance == 1) shift_remainders(qf, first, last+1); else for (i = last; i >= first; i--) set_slot(qf, i + distance, get_slot(qf, i)); } __host__ __device__ static inline void shift_runends(QF *qf, int64_t first, uint64_t last, uint64_t distance) { #if DEBUG_ASSERTS assert(last < qf->metadata->xnslots && distance < 64); #endif uint64_t first_word = first / 64; uint64_t bstart = first % 64; uint64_t last_word = (last + distance + 1) / 64; uint64_t bend = (last + distance + 1) % 64; if (last_word != first_word) { METADATA_WORD(qf, runends, 64*last_word) = shift_into_b(METADATA_WORD(qf, runends, 64*(last_word-1)), METADATA_WORD(qf, runends, 64*last_word), 0, bend, distance); bend = 64; last_word--; while (last_word != first_word) { METADATA_WORD(qf, runends, 64*last_word) = shift_into_b(METADATA_WORD(qf, runends, 64*(last_word-1)), METADATA_WORD(qf, runends, 64*last_word), 0, bend, distance); last_word--; } } METADATA_WORD(qf, runends, 64*last_word) = shift_into_b(0, METADATA_WORD(qf, runends, 64*last_word), bstart, bend, distance); } __host__ __device__ static inline bool insert_replace_slots_and_shift_remainders_and_runends_and_offsets(QF *qf, int operation, uint64_t bucket_index, uint64_t overwrite_index, const uint64_t *remainders, uint64_t total_remainders, uint64_t noverwrites) { uint64_t empties[67]; uint64_t i; int64_t j; int64_t ninserts = total_remainders - noverwrites; uint64_t insert_index = overwrite_index + noverwrites; if (ninserts > 0) { /* First, shift things to create n empty spaces where we need them. */ find_next_n_empty_slots(qf, insert_index, ninserts, empties); if (empties[0] >= qf->metadata->xnslots) { return false; } for (j = 0; j < ninserts - 1; j++) shift_slots(qf, empties[j+1] + 1, empties[j] - 1, j + 1); shift_slots(qf, insert_index, empties[ninserts - 1] - 1, ninserts); for (j = 0; j < ninserts - 1; j++) shift_runends(qf, empties[j+1] + 1, empties[j] - 1, j + 1); shift_runends(qf, insert_index, empties[ninserts - 1] - 1, ninserts); for (i = noverwrites; i < total_remainders - 1; i++) METADATA_WORD(qf, runends, overwrite_index + i) &= ~(1ULL << (((overwrite_index + i) % QF_SLOTS_PER_BLOCK) % 64)); switch (operation) { case 0: /* insert into empty bucket */ #if DEBUG_ASSERTS assert (noverwrites == 0); #endif METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) |= 1ULL << (((overwrite_index + total_remainders - 1) % QF_SLOTS_PER_BLOCK) % 64); break; case 1: /* append to bucket */ METADATA_WORD(qf, runends, overwrite_index + noverwrites - 1) &= ~(1ULL << (((overwrite_index + noverwrites - 1) % QF_SLOTS_PER_BLOCK) % 64)); METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) |= 1ULL << (((overwrite_index + total_remainders - 1) % QF_SLOTS_PER_BLOCK) % 64); break; case 2: /* insert into bucket */ METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) &= ~(1ULL << (((overwrite_index + total_remainders - 1) % QF_SLOTS_PER_BLOCK) % 64)); break; default: printf("Invalid operation %d\n", operation); #ifdef __CUDA_ARCH__ __threadfence(); // ensure store issued before trap asm("trap;"); #else abort(); #endif } uint64_t npreceding_empties = 0; for (i = bucket_index / QF_SLOTS_PER_BLOCK + 1; i <= empties[0]/QF_SLOTS_PER_BLOCK; i++) { while ((int64_t)npreceding_empties < ninserts && empties[ninserts - 1 - npreceding_empties] / QF_SLOTS_PER_BLOCK < i) npreceding_empties++; if (get_block(qf, i)->offset + ninserts - npreceding_empties < BITMASK(8*sizeof(qf->blocks[0].offset))) get_block(qf, i)->offset += ninserts - npreceding_empties; else get_block(qf, i)->offset = (uint8_t) BITMASK(8*sizeof(qf->blocks[0].offset)); } } for (i = 0; i < total_remainders; i++) set_slot(qf, overwrite_index + i, remainders[i]); //modify_metadata(&qf->runtimedata->pc_noccupied_slots, ninserts); return true; } __host__ __device__ static inline int remove_replace_slots_and_shift_remainders_and_runends_and_offsets(QF *qf, int operation, uint64_t bucket_index, uint64_t overwrite_index, const uint64_t *remainders, uint64_t total_remainders, uint64_t old_length) { uint64_t i; // Update the slots for (i = 0; i < total_remainders; i++) set_slot(qf, overwrite_index + i, remainders[i]); // If this is the last thing in its run, then we may need to set a new runend bit if (is_runend(qf, overwrite_index + old_length - 1)) { if (total_remainders > 0) { // If we're not deleting this entry entirely, then it will still the last entry in this run METADATA_WORD(qf, runends, overwrite_index + total_remainders - 1) |= 1ULL << ((overwrite_index + total_remainders - 1) % 64); } else if (overwrite_index > bucket_index && !is_runend(qf, overwrite_index - 1)) { // If we're deleting this entry entirely, but it is not the first entry in this run, // then set the preceding entry to be the runend METADATA_WORD(qf, runends, overwrite_index - 1) |= 1ULL << ((overwrite_index - 1) % 64); } } // shift slots back one run at a time uint64_t original_bucket = bucket_index; uint64_t current_bucket = bucket_index; uint64_t current_slot = overwrite_index + total_remainders; uint64_t current_distance = old_length - total_remainders; int ret_current_distance = current_distance; while (current_distance > 0) { if (is_runend(qf, current_slot + current_distance - 1)) { do { current_bucket++; } while (current_bucket < current_slot + current_distance && !is_occupied(qf, current_bucket)); } if (current_bucket <= current_slot) { set_slot(qf, current_slot, get_slot(qf, current_slot + current_distance)); if (is_runend(qf, current_slot) != is_runend(qf, current_slot + current_distance)) METADATA_WORD(qf, runends, current_slot) ^= 1ULL << (current_slot % 64); current_slot++; } else if (current_bucket <= current_slot + current_distance) { uint64_t i; for (i = current_slot; i < current_slot + current_distance; i++) { set_slot(qf, i, 0); METADATA_WORD(qf, runends, i) &= ~(1ULL << (i % 64)); } current_distance = current_slot + current_distance - current_bucket; current_slot = current_bucket; } else { current_distance = 0; } } // reset the occupied bit of the hash bucket index if the hash is the // only item in the run and is removed completely. if (operation && !total_remainders) METADATA_WORD(qf, occupieds, bucket_index) &= ~(1ULL << (bucket_index % 64)); // update the offset bits. // find the number of occupied slots in the original_bucket block. // Then find the runend slot corresponding to the last run in the // original_bucket block. // Update the offset of the block to which it belongs. uint64_t original_block = original_bucket / QF_SLOTS_PER_BLOCK; if (old_length > total_remainders) { // we only update offsets if we shift/delete anything while (1) { uint64_t last_occupieds_hash_index = QF_SLOTS_PER_BLOCK * original_block + (QF_SLOTS_PER_BLOCK - 1); uint64_t runend_index = run_end(qf, last_occupieds_hash_index); // runend spans across the block // update the offset of the next block if (runend_index / QF_SLOTS_PER_BLOCK == original_block) { // if the run ends in the same block if (get_block(qf, original_block + 1)->offset == 0) break; get_block(qf, original_block + 1)->offset = 0; } else { // if the last run spans across the block if (get_block(qf, original_block + 1)->offset == (runend_index - last_occupieds_hash_index)) break; get_block(qf, original_block + 1)->offset = (runend_index - last_occupieds_hash_index); } original_block++; } } //int num_slots_freed = old_length - total_remainders; //modify_metadata(&qf->runtimedata->pc_noccupied_slots, -num_slots_freed); /*qf->metadata->noccupied_slots -= (old_length - total_remainders);*/ if (!total_remainders) { //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, -1); /*qf->metadata->ndistinct_elts--;*/ } return ret_current_distance; } /***************************************************************************** * Code that uses the above to implement a QF with keys and inline counters. * *****************************************************************************/ /* Counter format: 0 xs: <empty string> 1 x: x 2 xs: xx 3 0s: 000 >2 xs: xbc...cx for x != 0, b < x, c != 0, x >3 0s: 0c...c00 for c != 0 */ __host__ __device__ static inline uint64_t *encode_counter(QF *qf, uint64_t remainder, uint64_t counter, uint64_t *slots) { uint64_t digit = remainder; uint64_t base = (1ULL << qf->metadata->bits_per_slot) - 1; uint64_t *p = slots; if (counter == 0) return p; *--p = remainder; if (counter == 1) return p; if (counter == 2) { *--p = remainder; return p; } if (counter == 3 && remainder == 0) { *--p = remainder; *--p = remainder; return p; } if (counter == 3 && remainder > 0) { *--p = 0; *--p = remainder; return p; } if (remainder == 0) *--p = remainder; else base--; if (remainder) counter -= 3; else counter -= 4; do { digit = counter % base; digit++; /* Zero not allowed */ if (remainder && digit >= remainder) digit++; /* Cannot overflow since digit is mod 2^r-2 */ *--p = digit; counter /= base; } while (counter); if (remainder && digit >= remainder) *--p = 0; *--p = remainder; return p; } /* Returns the length of the encoding. REQUIRES: index points to first slot of a counter. */ __host__ __device__ static inline uint64_t decode_counter(const QF *qf, uint64_t index, uint64_t *remainder, uint64_t *count) { uint64_t base; uint64_t rem; uint64_t cnt; uint64_t digit; uint64_t end; *remainder = rem = get_slot(qf, index); if (is_runend(qf, index)) { /* Entire run is "0" */ *count = 1; return index; } digit = get_slot(qf, index + 1); if (is_runend(qf, index + 1)) { *count = digit == rem ? 2 : 1; return index + (digit == rem ? 1 : 0); } if (rem > 0 && digit >= rem) { *count = digit == rem ? 2 : 1; return index + (digit == rem ? 1 : 0); } if (rem > 0 && digit == 0 && get_slot(qf, index + 2) == rem) { *count = 3; return index + 2; } if (rem == 0 && digit == 0) { if (get_slot(qf, index + 2) == 0) { *count = 3; return index + 2; } else { *count = 2; return index + 1; } } cnt = 0; base = (1ULL << qf->metadata->bits_per_slot) - (rem ? 2 : 1); end = index + 1; while (digit != rem && !is_runend(qf, end)) { if (digit > rem) digit--; if (digit && rem) digit--; cnt = cnt * base + digit; end++; digit = get_slot(qf, end); } if (rem) { *count = cnt + 3; return end; } if (is_runend(qf, end) || get_slot(qf, end + 1) != 0) { *count = 1; return index; } *count = cnt + 4; return end + 1; } /* return the next slot which corresponds to a * different element * */ // __device__ static inline uint64_t next_slot(QF *qf, uint64_t current) // { // uint64_t rem = get_slot(qf, current); // current++; // while (get_slot(qf, current) == rem && current <= qf->metadata->nslots) { // current++; // } // return current; // } //code for approx inserts __host__ __device__ static inline qf_returns insert1_if_not_exists(QF *qf, __uint64_t hash, uint8_t * value) { uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK; uint64_t compare_remainder = hash_remainder >> qf->metadata->value_bits; /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { if (!qf_lock(qf, hash_bucket_index, true, runtime_lock)) return QF_COULDNT_LOCK; } */ //printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder); //approx filter has estimate of only one insert per item // #ifdef __CUDA_ARCH__ // atomicAdd((unsigned long long *)&qf->metadata->noccupied_slots, 1ULL); // #else // abort(); // #endif if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) { METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); set_slot(qf, hash_bucket_index, hash_remainder); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); //modify_metadata(&qf->runtimedata->pc_nelts, 1); } else { uint64_t runend_index = run_end(qf, hash_bucket_index); int operation = 0; /* Insert into empty bucket */ uint64_t insert_index = runend_index + 1; uint64_t new_value = hash_remainder; /* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1; if (is_occupied(qf, hash_bucket_index)) { /* Find the counter for this remainder if it exists. */ uint64_t current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits; //uint64_t zero_terminator = runstart_index; /* Skip over counters for other remainders. */ while (current_remainder < compare_remainder && runstart_index <= runend_index) { runstart_index++; current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits; } /* If this is the first time we've inserted the new remainder, and it is larger than any remainder in the run. */ if (runstart_index > runend_index) { operation = 1; insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* This is the first time we're inserting this remainder, but there are larger remainders already in the run. */ } else if (current_remainder != compare_remainder) { operation = 2; /* Inserting */ insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* Cases below here: we're incrementing the (simple or extended) counter for this remainder. */ /* If there's exactly one instance of this remainder. */ } else { //get remainder *value = get_slot(qf, runstart_index) && BITMASK(qf->metadata->value_bits); return QF_ITEM_FOUND; } } //else { //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //} if (operation >= 0) { uint64_t empty_slot_index = find_first_empty_slot(qf, runend_index+1); #if DROP_ON_BIG_CLUSTER // if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ // return QF_FULL; // } if (qf->metadata->qf_full){ return QF_FULL; } if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ qf->metadata->qf_full = true; return QF_FULL; } #endif if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){ return QF_FULL; } if (empty_slot_index >= qf->metadata->xnslots) { printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index); return QF_FULL; } shift_remainders(qf, insert_index, empty_slot_index); set_slot(qf, insert_index, new_value); //ret_distance = insert_index - hash_bucket_index; shift_runends(qf, insert_index, empty_slot_index-1, 1); switch (operation) { case 0: METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64); break; case 1: METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64)); METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64); break; case 2: METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64)); break; default: printf("Invalid operation %d\n", operation); #ifdef __CUDA_ARCH__ __threadfence(); // ensure store issued before trap asm("trap;"); #else abort(); #endif } /* * Increment the offset for each block between the hash bucket index * and block of the empty slot * */ uint64_t i; for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <= empty_slot_index/QF_SLOTS_PER_BLOCK; i++) { if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset))) get_block(qf, i)->offset++; #if DEBUG_ASSERTS assert(get_block(qf, i)->offset != 0); #endif } //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); } //modify_metadata(&qf->runtimedata->pc_nelts, 1); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); } /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { qf_unlock(qf, hash_bucket_index, true); } */ return QF_ITEM_INSERTED; } __device__ static inline qf_returns insert1_if_not_exists_cooperative(QF *qf, __uint64_t hash, uint8_t * value, int warpID) { uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK; uint64_t compare_remainder = hash_remainder >> qf->metadata->value_bits; /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { if (!qf_lock(qf, hash_bucket_index, true, runtime_lock)) return QF_COULDNT_LOCK; } */ //printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder); //approx filter has estimate of only one insert per item // #ifdef __CUDA_ARCH__ // atomicAdd((unsigned long long *)&qf->metadata->noccupied_slots, 1ULL); // #else // abort(); // #endif //this step can't be improved, minimum one mem check if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) { METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); set_slot(qf, hash_bucket_index, hash_remainder); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); //modify_metadata(&qf->runtimedata->pc_nelts, 1); } else { //maybe improve run_end, come back later and check uint64_t runend_index = run_end(qf, hash_bucket_index); int operation = 0; /* Insert into empty bucket */ uint64_t insert_index = runend_index + 1; uint64_t new_value = hash_remainder; /* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1; if (is_occupied(qf, hash_bucket_index)) { /* Find the counter for this remainder if it exists. */ uint64_t current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits; //uint64_t zero_terminator = runstart_index; /* Skip over counters for other remainders. */ //we look for runstart_index <= runend and current_remainder >= compare_remainder uint64_t my_runstart_index = runstart_index + warpID; uint64_t my_current_remainder = get_slot(qf, my_runstart_index) >> qf->metadata->value_bits; while(true){ //generate ballot bool ballot = !((my_runstart_index <= runend_index) && (my_current_remainder < compare_remainder)); int warp_to_query = __ffs(__ballot_sync(0xffffffff, ballot))-1; if (warp_to_query != -1){ //match kinda found! runstart_index = __shfl_sync(0xffffffff, my_runstart_index, warp_to_query); //exit successfully break; } //if all fail retry at the next iteration my_runstart_index+=32; } // while (current_remainder < compare_remainder && runstart_index <= // runend_index) { // runstart_index++; // current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits; // } //reset current remainder to be correct current_remainder = get_slot(qf, runstart_index) >> qf->metadata->value_bits; /* If this is the first time we've inserted the new remainder, and it is larger than any remainder in the run. */ if (runstart_index > runend_index) { operation = 1; insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* This is the first time we're inserting this remainder, but there are larger remainders already in the run. */ } else if (current_remainder != compare_remainder) { operation = 2; /* Inserting */ insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* Cases below here: we're incrementing the (simple or extended) counter for this remainder. */ /* If there's exactly one instance of this remainder. */ } else { //get remainder *value = get_slot(qf, runstart_index) && BITMASK(qf->metadata->value_bits); return QF_ITEM_FOUND; } } //else { //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //} if (operation >= 0) { uint64_t empty_slot_index; if (warpID == 0) empty_slot_index = find_first_empty_slot(qf, runend_index+1); #if DROP_ON_BIG_CLUSTER // if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ // return QF_FULL; // } if (qf->metadata->qf_full){ return QF_FULL; } if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ qf->metadata->qf_full = true; return QF_FULL; } #endif if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){ return QF_FULL; } empty_slot_index = __shfl_sync(0xffffffff, empty_slot_index, 0); if (empty_slot_index >= qf->metadata->xnslots) { printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index); return QF_FULL; } // if (warpID == 0){ // } //shift remainders changes - atm, none shift_remainders_cooperative(qf, insert_index, empty_slot_index, warpID); //set slot changes, atm, none if (warpID == 0){ set_slot(qf, insert_index, new_value); //ret_distance = insert_index - hash_bucket_index; shift_runends(qf, insert_index, empty_slot_index-1, 1); switch (operation) { case 0: METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64); break; case 1: METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64)); METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64); break; case 2: METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64)); break; default: printf("Invalid operation %d\n", operation); #ifdef __CUDA_ARCH__ __threadfence(); // ensure store issued before trap asm("trap;"); #else abort(); #endif } /* * Increment the offset for each block between the hash bucket index * and block of the empty slot * */ uint64_t i; for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <= empty_slot_index/QF_SLOTS_PER_BLOCK; i++) { if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset))) get_block(qf, i)->offset++; #if DEBUG_ASSERTS assert(get_block(qf, i)->offset != 0); #endif } //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); } // end of single threaded brace } //modify_metadata(&qf->runtimedata->pc_nelts, 1); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); //closing barce for warpID == 0 } /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { qf_unlock(qf, hash_bucket_index, true); } */ return QF_ITEM_INSERTED; } __host__ __device__ static inline qf_returns insert1(QF *qf, __uint64_t hash, uint8_t runtime_lock) { int ret_distance = 0; uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK; /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { if (!qf_lock(qf, hash_bucket_index, true, runtime_lock)) return QF_COULDNT_LOCK; } */ //printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder); if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) { METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); set_slot(qf, hash_bucket_index, hash_remainder); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); ret_distance = 0; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); //modify_metadata(&qf->runtimedata->pc_nelts, 1); } else { uint64_t runend_index = run_end(qf, hash_bucket_index); #if DROP_ON_RUNEND if (runend_index - hash_bucket_index >= RUNEND_CUTOFF){ //printf("Dropping\n"); return QF_FULL; } #endif int operation = 0; /* Insert into empty bucket */ uint64_t insert_index = runend_index + 1; uint64_t new_value = hash_remainder; /* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1; if (is_occupied(qf, hash_bucket_index)) { /* Find the counter for this remainder if it exists. */ uint64_t current_remainder = get_slot(qf, runstart_index); uint64_t zero_terminator = runstart_index; /* The counter for 0 is special. */ if (current_remainder == 0) { uint64_t t = runstart_index + 1; while (t < runend_index && get_slot(qf, t) != 0) t++; if (t < runend_index && get_slot(qf, t+1) == 0) zero_terminator = t+1; /* Three or more 0s */ else if (runstart_index < runend_index && get_slot(qf, runstart_index + 1) == 0) zero_terminator = runstart_index + 1; /* Exactly two 0s */ /* Otherwise, exactly one 0 (i.e. zero_terminator == runstart_index) */ /* May read past end of run, but that's OK because loop below can handle that */ if (hash_remainder != 0) { runstart_index = zero_terminator + 1; current_remainder = get_slot(qf, runstart_index); } } /* Skip over counters for other remainders. */ while (current_remainder < hash_remainder && runstart_index <= runend_index) { /* If this remainder has an extended counter, skip over it. */ if (runstart_index < runend_index && get_slot(qf, runstart_index + 1) < current_remainder) { runstart_index = runstart_index + 2; while (runstart_index < runend_index && get_slot(qf, runstart_index) != current_remainder) runstart_index++; runstart_index++; /* This remainder has a simple counter. */ } else { runstart_index++; } /* This may read past the end of the run, but the while loop condition will prevent us from using the invalid result in that case. */ current_remainder = get_slot(qf, runstart_index); } /* If this is the first time we've inserted the new remainder, and it is larger than any remainder in the run. */ if (runstart_index > runend_index) { operation = 1; insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* This is the first time we're inserting this remainder, but there are larger remainders already in the run. */ } else if (current_remainder != hash_remainder) { operation = 2; /* Inserting */ insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* Cases below here: we're incrementing the (simple or extended) counter for this remainder. */ /* If there's exactly one instance of this remainder. */ } else if (runstart_index == runend_index || (hash_remainder > 0 && get_slot(qf, runstart_index + 1) > hash_remainder) || (hash_remainder == 0 && zero_terminator == runstart_index)) { operation = 2; /* Insert */ insert_index = runstart_index; new_value = hash_remainder; /* If there are exactly two instances of this remainder. */ } else if ((hash_remainder > 0 && get_slot(qf, runstart_index + 1) == hash_remainder) || (hash_remainder == 0 && zero_terminator == runstart_index + 1)) { operation = 2; /* Insert */ insert_index = runstart_index + 1; new_value = 0; /* Special case for three 0s */ } else if (hash_remainder == 0 && zero_terminator == runstart_index + 2) { operation = 2; /* Insert */ insert_index = runstart_index + 1; new_value = 1; /* There is an extended counter for this remainder. */ } else { /* Move to the LSD of the counter. */ insert_index = runstart_index + 1; while (get_slot(qf, insert_index+1) != hash_remainder) insert_index++; /* Increment the counter. */ uint64_t digit, carry; do { carry = 0; digit = get_slot(qf, insert_index); // Convert a leading 0 (which is special) to a normal encoded digit if (digit == 0) { digit++; if (digit == current_remainder) digit++; } // Increment the digit digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot); // Ensure digit meets our encoding requirements if (digit == 0) { digit++; carry = 1; } if (digit == current_remainder) digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot); if (digit == 0) { digit++; carry = 1; } set_slot(qf, insert_index, digit); insert_index--; } while(insert_index > runstart_index && carry); /* If the counter needs to be expanded. */ if (insert_index == runstart_index && (carry > 0 || (current_remainder != 0 && digit >= current_remainder))) { operation = 2; /* insert */ insert_index = runstart_index + 1; if (!carry) /* To prepend a 0 before the counter if the MSD is greater than the rem */ new_value = 0; else if (carry) { /* Increment the new value because we don't use 0 to encode counters */ new_value = 2; /* If the rem is greater than or equal to the new_value then fail*/ #if DEBUG_ASSERTS if (current_remainder > 0) assert(new_value < current_remainder); #endif } } else { operation = -1; } } } //else { //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //} if (operation >= 0) { uint64_t empty_slot_index = find_first_empty_slot(qf, runend_index+1); #if DROP_ON_BIG_CLUSTER // if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ // return QF_FULL; // } if (qf->metadata->qf_full){ return QF_FULL; } if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ qf->metadata->qf_full = true; return QF_FULL; } #endif if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){ return QF_FULL; } if (empty_slot_index >= qf->metadata->xnslots) { printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index); return QF_FULL; } shift_remainders(qf, insert_index, empty_slot_index); set_slot(qf, insert_index, new_value); ret_distance = insert_index - hash_bucket_index; shift_runends(qf, insert_index, empty_slot_index-1, 1); switch (operation) { case 0: METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64); break; case 1: METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64)); METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64); break; case 2: METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64)); break; default: printf("Invalid operation %d\n", operation); #ifdef __CUDA_ARCH__ __threadfence(); // ensure store issued before trap asm("trap;"); #else abort(); #endif } /* * Increment the offset for each block between the hash bucket index * and block of the empty slot * */ uint64_t i; for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <= empty_slot_index/QF_SLOTS_PER_BLOCK; i++) { if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset))) get_block(qf, i)->offset++; #if DEBUG_ASSERTS assert(get_block(qf, i)->offset != 0); #endif } //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); } //modify_metadata(&qf->runtimedata->pc_nelts, 1); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); } /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { qf_unlock(qf, hash_bucket_index, true); } */ //return ret_distance; return QF_ITEM_INSERTED; } __device__ static inline int insert1_cooperative(QF *qf, __uint64_t hash, uint8_t runtime_lock, int warpID) { int ret_distance = 0; uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK; /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { if (!qf_lock(qf, hash_bucket_index, true, runtime_lock)) return QF_COULDNT_LOCK; } */ //printf("In insert1, Index is %llu, block_offset is %llu, remainder is %llu \n", hash_bucket_index, hash_bucket_block_offset, hash_remainder); //this is checking if the slot is empty, i.e. direct insert //no memmove required, no warp fancyness //no space for optimization on a warp level if (is_empty(qf, hash_bucket_index) /* might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index */) { METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); set_slot(qf, hash_bucket_index, hash_remainder); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); ret_distance = 0; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); //modify_metadata(&qf->runtimedata->pc_nelts, 1); } else { //slot was occupied //I believe this can be optimized? not super certain about the performance reqs uint64_t runend_index = run_end(qf, hash_bucket_index); int operation = 0; /* Insert into empty bucket */ uint64_t insert_index = runend_index + 1; uint64_t new_value = hash_remainder; /* printf("RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index- 1) + 1; if (is_occupied(qf, hash_bucket_index)) { /* Find the counter for this remainder if it exists. */ uint64_t current_remainder = get_slot(qf, runstart_index); uint64_t zero_terminator = runstart_index; /* The counter for 0 is special. */ //this logic can't be optimized if (current_remainder == 0) { uint64_t t = runstart_index + 1; while (t < runend_index && get_slot(qf, t) != 0) t++; if (t < runend_index && get_slot(qf, t+1) == 0) zero_terminator = t+1; /* Three or more 0s */ else if (runstart_index < runend_index && get_slot(qf, runstart_index + 1) == 0) zero_terminator = runstart_index + 1; /* Exactly two 0s */ /* Otherwise, exactly one 0 (i.e. zero_terminator == runstart_index) */ /* May read past end of run, but that's OK because loop below can handle that */ if (hash_remainder != 0) { runstart_index = zero_terminator + 1; current_remainder = get_slot(qf, runstart_index); } } //THIS CAN BE OPTIMIZED //rewrite //needs to be loopy boy and handle special counters //I'm thinking if you are weird then step back once? uint64_t my_runstart_index = runstart_index+warpID; uint64_t my_current_remainder = get_slot(qf, my_runstart_index); //everyone has one of 32 partitions //get slot - feeds the remainder //each remainder is either < us - good // = us - great! // > us - bad // => only occur before the specified points iff //on correct use there should be a dividing line? if (my_runstart_index <= runend_index){ } /* Skip over counters for other remainders. */ while (current_remainder < hash_remainder && runstart_index <= runend_index) { /* If this remainder has an extended counter, skip over it. */ if (runstart_index < runend_index && get_slot(qf, runstart_index + 1) < current_remainder) { //if the current slot < current remainder //a runstart_index = runstart_index + 2; while (runstart_index < runend_index && get_slot(qf, runstart_index) != current_remainder) runstart_index++; runstart_index++; /* This remainder has a simple counter. */ } else { runstart_index++; } /* This may read past the end of the run, but the while loop condition will prevent us from using the invalid result in that case. */ current_remainder = get_slot(qf, runstart_index); } /* If this is the first time we've inserted the new remainder, and it is larger than any remainder in the run. */ if (runstart_index > runend_index) { operation = 1; insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* This is the first time we're inserting this remainder, but there are larger remainders already in the run. */ } else if (current_remainder != hash_remainder) { operation = 2; /* Inserting */ insert_index = runstart_index; new_value = hash_remainder; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); /* Cases below here: we're incrementing the (simple or extended) counter for this remainder. */ /* If there's exactly one instance of this remainder. */ } else if (runstart_index == runend_index || (hash_remainder > 0 && get_slot(qf, runstart_index + 1) > hash_remainder) || (hash_remainder == 0 && zero_terminator == runstart_index)) { operation = 2; /* Insert */ insert_index = runstart_index; new_value = hash_remainder; /* If there are exactly two instances of this remainder. */ } else if ((hash_remainder > 0 && get_slot(qf, runstart_index + 1) == hash_remainder) || (hash_remainder == 0 && zero_terminator == runstart_index + 1)) { operation = 2; /* Insert */ insert_index = runstart_index + 1; new_value = 0; /* Special case for three 0s */ } else if (hash_remainder == 0 && zero_terminator == runstart_index + 2) { operation = 2; /* Insert */ insert_index = runstart_index + 1; new_value = 1; /* There is an extended counter for this remainder. */ } else { /* Move to the LSD of the counter. */ insert_index = runstart_index + 1; while (get_slot(qf, insert_index+1) != hash_remainder) insert_index++; /* Increment the counter. */ uint64_t digit, carry; do { carry = 0; digit = get_slot(qf, insert_index); // Convert a leading 0 (which is special) to a normal encoded digit if (digit == 0) { digit++; if (digit == current_remainder) digit++; } // Increment the digit digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot); // Ensure digit meets our encoding requirements if (digit == 0) { digit++; carry = 1; } if (digit == current_remainder) digit = (digit + 1) & BITMASK(qf->metadata->bits_per_slot); if (digit == 0) { digit++; carry = 1; } set_slot(qf, insert_index, digit); insert_index--; } while(insert_index > runstart_index && carry); /* If the counter needs to be expanded. */ if (insert_index == runstart_index && (carry > 0 || (current_remainder != 0 && digit >= current_remainder))) { operation = 2; /* insert */ insert_index = runstart_index + 1; if (!carry) /* To prepend a 0 before the counter if the MSD is greater than the rem */ new_value = 0; else if (carry) { /* Increment the new value because we don't use 0 to encode counters */ new_value = 2; /* If the rem is greater than or equal to the new_value then fail*/ #if DEBUG_ASSERTS if (current_remainder > 0) assert(new_value < current_remainder); #endif } } else { operation = -1; } } } //else { //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //} if (operation >= 0) { uint64_t empty_slot_index = find_first_empty_slot(qf, runend_index+1); #if DROP_ON_BIG_CLUSTER // if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ // return QF_FULL; // } if (qf->metadata->qf_full){ return QF_FULL; } if (empty_slot_index - hash_bucket_index > BIG_CLUSTER_DROPOFF){ qf->metadata->qf_full = true; return QF_FULL; } #endif if (empty_slot_index/NUM_SLOTS_TO_LOCK > hash_bucket_index/NUM_SLOTS_TO_LOCK+1){ return QF_FULL; } if (empty_slot_index >= qf->metadata->xnslots) { printf("Ran out of space. Total xnslots is %lu, first empty slot is %lu\n", qf->metadata->xnslots, empty_slot_index); return QF_FULL; } shift_remainders(qf, insert_index, empty_slot_index); set_slot(qf, insert_index, new_value); ret_distance = insert_index - hash_bucket_index; shift_runends(qf, insert_index, empty_slot_index-1, 1); switch (operation) { case 0: METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK) % 64); break; case 1: METADATA_WORD(qf, runends, insert_index-1) &= ~(1ULL << (((insert_index-1) %QF_SLOTS_PER_BLOCK) %64)); METADATA_WORD(qf, runends, insert_index) |= 1ULL << ((insert_index%QF_SLOTS_PER_BLOCK)% 64); break; case 2: METADATA_WORD(qf, runends, insert_index) &= ~(1ULL <<((insert_index %QF_SLOTS_PER_BLOCK) %64)); break; default: printf("Invalid operation %d\n", operation); #ifdef __CUDA_ARCH__ __threadfence(); // ensure store issued before trap asm("trap;"); #else abort(); #endif } /* * Increment the offset for each block between the hash bucket index * and block of the empty slot * */ uint64_t i; for (i = hash_bucket_index / QF_SLOTS_PER_BLOCK + 1; i <= empty_slot_index/QF_SLOTS_PER_BLOCK; i++) { if (get_block(qf, i)->offset < BITMASK(8*sizeof(qf->blocks[0].offset))) get_block(qf, i)->offset++; #if DEBUG_ASSERTS assert(get_block(qf, i)->offset != 0); #endif } //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); } //modify_metadata(&qf->runtimedata->pc_nelts, 1); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); } /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { qf_unlock(qf, hash_bucket_index, true); } */ return ret_distance; } __host__ __device__ static inline qf_returns insert(QF *qf, __uint64_t hash, uint64_t count, uint8_t runtime_lock) { int ret_distance = 0; uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t hash_bucket_block_offset = hash_bucket_index % QF_SLOTS_PER_BLOCK; /*uint64_t hash_bucket_lock_offset = hash_bucket_index % NUM_SLOTS_TO_LOCK;*/ /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { if (!qf_lock(qf, hash_bucket_index, false, runtime_lock)) return QF_COULDNT_LOCK; } */ uint64_t runend_index = run_end(qf, hash_bucket_index); /* Empty slot */ if (might_be_empty(qf, hash_bucket_index) && runend_index == hash_bucket_index) { METADATA_WORD(qf, runends, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); set_slot(qf, hash_bucket_index, hash_remainder); METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); //ERIC TODO: see if this metadata is needed--probably isn't compatible with GPU //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); //modify_metadata(&qf->runtimedata->pc_noccupied_slots, 1); //modify_metadata(&qf->runtimedata->pc_nelts, 1); /* This trick will, I hope, keep the fast case fast. */ if (count > 1) { insert(qf, hash, count - 1, QF_NO_LOCK); } } else { /* Non-empty slot */ uint64_t new_values[67]; int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf,hash_bucket_index- 1) + 1; bool ret; if (!is_occupied(qf, hash_bucket_index)) { /* Empty bucket, but its slot is occupied. */ uint64_t *p = encode_counter(qf, hash_remainder, count, &new_values[67]); ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, 0, hash_bucket_index, runstart_index, p, &new_values[67] - p, 0); if (!ret) return QF_FULL; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); ret_distance = runstart_index - hash_bucket_index; } else { /* Non-empty bucket */ uint64_t current_remainder, current_count, current_end; /* Find the counter for this remainder, if one exists. */ current_end = decode_counter(qf, runstart_index, &current_remainder,&current_count); while (current_remainder < hash_remainder && !is_runend(qf, current_end)) { runstart_index = current_end + 1; current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); } /* If we reached the end of the run w/o finding a counter for this remainder, then append a counter for this remainder to the run. */ if (current_remainder < hash_remainder) { uint64_t *p = encode_counter(qf, hash_remainder, count, &new_values[67]); ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, 1, /* Append to bucket */hash_bucket_index, current_end + 1, p, &new_values[67] - p, 0); if (!ret) return QF_FULL; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); ret_distance = (current_end + 1) - hash_bucket_index; /* Found a counter for this remainder. Add in the new count. */ } else if (current_remainder == hash_remainder) { uint64_t *p = encode_counter(qf, hash_remainder, current_count + count, &new_values[67]); ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, is_runend(qf, current_end) ? 1 : 2, hash_bucket_index, runstart_index, p, &new_values[67] - p, current_end - runstart_index + 1); if (!ret) return QF_FULL; ret_distance = runstart_index - hash_bucket_index; /* No counter for this remainder, but there are larger remainders, so we're not appending to the bucket. */ } else { uint64_t *p = encode_counter(qf, hash_remainder, count, &new_values[67]); ret = insert_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, 2, /* Insert to bucket */ hash_bucket_index, runstart_index, p, &new_values[67] - p, 0); if (!ret) return QF_FULL; //modify_metadata(&qf->runtimedata->pc_ndistinct_elts, 1); ret_distance = runstart_index - hash_bucket_index; } } METADATA_WORD(qf, occupieds, hash_bucket_index) |= 1ULL << (hash_bucket_block_offset % 64); //modify_metadata(&qf->runtimedata->pc_nelts, count); } /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { qf_unlock(qf, hash_bucket_index, false); } */ //return ret_distance; return QF_ITEM_INSERTED; } __host__ __device__ inline static int _remove(QF *qf, __uint64_t hash, uint64_t count, uint8_t runtime_lock) { int ret_numfreedslots = 0; uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t current_remainder, current_count, current_end; uint64_t new_values[67]; /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { if (!qf_lock(qf, hash_bucket_index, false, runtime_lock)) return -2; } */ /* Empty bucket */ if (!is_occupied(qf, hash_bucket_index)) return -1; uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index - 1) + 1; uint64_t original_runstart_index = runstart_index; int only_item_in_the_run = 0; /*Find the counter for this remainder, if one exists.*/ current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); while (current_remainder < hash_remainder && !is_runend(qf, current_end)) { runstart_index = current_end + 1; current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); } /* remainder not found in the given run */ if (current_remainder != hash_remainder) return -1; if (original_runstart_index == runstart_index && is_runend(qf, current_end)) only_item_in_the_run = 1; /* endode the new counter */ uint64_t *p = encode_counter(qf, hash_remainder, count > current_count ? 0 : current_count - count, &new_values[67]); ret_numfreedslots = remove_replace_slots_and_shift_remainders_and_runends_and_offsets(qf, only_item_in_the_run, hash_bucket_index, runstart_index, p, &new_values[67] - p, current_end - runstart_index + 1); // update the nelements. //modify_metadata(&qf->runtimedata->pc_nelts, -count); /*qf->metadata->nelts -= count;*/ /* if (GET_NO_LOCK(runtime_lock) != QF_NO_LOCK) { qf_unlock(qf, hash_bucket_index, false); } */ return ret_numfreedslots; } /*********************************************************************** * Code that uses the above to implement key-value-counter operations. * ***********************************************************************/ __host__ uint64_t qf_init(QF *qf, uint64_t nslots, uint64_t key_bits, uint64_t value_bits, enum qf_hashmode hash, uint32_t seed, void* buffer, uint64_t buffer_len) { uint64_t num_slots, xnslots, nblocks; uint64_t key_remainder_bits, bits_per_slot; uint64_t size; uint64_t total_num_bytes; assert(popcnt(nslots) == 1); /* nslots must be a power of 2 */ num_slots = nslots; xnslots = nslots + 10*sqrt((double)nslots); nblocks = (xnslots + QF_SLOTS_PER_BLOCK - 1) / QF_SLOTS_PER_BLOCK; key_remainder_bits = key_bits; while (nslots > 1 && key_remainder_bits > 0) { key_remainder_bits--; nslots >>= 1; } assert(key_remainder_bits >= 2); bits_per_slot = key_remainder_bits + value_bits; assert (QF_BITS_PER_SLOT == 0 || QF_BITS_PER_SLOT == bits_per_slot); assert(bits_per_slot > 1); #if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64 size = nblocks * sizeof(qfblock); #else size = nblocks * (sizeof(qfblock) + QF_SLOTS_PER_BLOCK * bits_per_slot / 8); #endif total_num_bytes = sizeof(qfmetadata) + size; if (buffer == NULL || total_num_bytes > buffer_len) return total_num_bytes; // memset(buffer, 0, total_num_bytes); qf->metadata = (qfmetadata *)(buffer); qf->blocks = (qfblock *)(qf->metadata + 1); qf->metadata->magic_endian_number = MAGIC_NUMBER; qf->metadata->reserved = 0; qf->metadata->hash_mode = hash; qf->metadata->total_size_in_bytes = size; qf->metadata->seed = seed; qf->metadata->nslots = num_slots; qf->metadata->xnslots = xnslots; qf->metadata->key_bits = key_bits; qf->metadata->value_bits = value_bits; qf->metadata->key_remainder_bits = key_remainder_bits; qf->metadata->bits_per_slot = bits_per_slot; qf->metadata->range = qf->metadata->nslots; qf->metadata->range <<= qf->metadata->key_remainder_bits; qf->metadata->nblocks = (qf->metadata->xnslots + QF_SLOTS_PER_BLOCK - 1) / QF_SLOTS_PER_BLOCK; qf->metadata->nelts = 0; qf->metadata->ndistinct_elts = 0; qf->metadata->noccupied_slots = 0; qf->metadata->qf_full = false; qf->runtimedata->num_locks = ((qf->metadata->xnslots/NUM_SLOTS_TO_LOCK)+2); pc_init(&qf->runtimedata->pc_nelts, (int64_t*)&qf->metadata->nelts, 8, 100); pc_init(&qf->runtimedata->pc_ndistinct_elts, (int64_t*)&qf->metadata->ndistinct_elts, 8, 100); pc_init(&qf->runtimedata->pc_noccupied_slots, (int64_t*)&qf->metadata->noccupied_slots, 8, 100); /* initialize container resize */ qf->runtimedata->auto_resize = 0; qf->runtimedata->container_resize = qf_resize_malloc; /* initialize all the locks to 0 */ qf->runtimedata->metadata_lock = 0; //etodo: copy this to GPU qf->runtimedata->locks = (uint16_t *)calloc(qf->runtimedata->num_locks, sizeof(uint16_t)); if (qf->runtimedata->locks == NULL) { perror("Couldn't allocate memory for runtime locks."); exit(EXIT_FAILURE); } #ifdef LOG_WAIT_TIME qf->runtimedata->wait_times = (wait_time_data* )calloc(qf->runtimedata->num_locks+1, sizeof(wait_time_data)); if (qf->runtimedata->wait_times == NULL) { perror("Couldn't allocate memory for runtime wait_times."); exit(EXIT_FAILURE); } #endif return total_num_bytes; } __host__ uint64_t qf_use(QF* qf, void* buffer, uint64_t buffer_len) { qf->metadata = (qfmetadata *)(buffer); if (qf->metadata->total_size_in_bytes + sizeof(qfmetadata) > buffer_len) { return qf->metadata->total_size_in_bytes + sizeof(qfmetadata); } qf->blocks = (qfblock *)(qf->metadata + 1); qf->runtimedata = (qfruntime *)calloc(sizeof(qfruntime), 1); if (qf->runtimedata == NULL) { perror("Couldn't allocate memory for runtime data."); exit(EXIT_FAILURE); } /* initialize all the locks to 0 */ qf->runtimedata->metadata_lock = 0; qf->runtimedata->locks = (uint16_t *)calloc(qf->runtimedata->num_locks, sizeof(uint16_t)); if (qf->runtimedata->locks == NULL) { perror("Couldn't allocate memory for runtime locks."); exit(EXIT_FAILURE); } #ifdef LOG_WAIT_TIME qf->runtimedata->wait_times = (wait_time_data* )calloc(qf->runtimedata->num_locks+1, sizeof(wait_time_data)); if (qf->runtimedata->wait_times == NULL) { perror("Couldn't allocate memory for runtime wait_times."); exit(EXIT_FAILURE); } #endif return sizeof(qfmetadata) + qf->metadata->total_size_in_bytes; } __host__ void *qf_destroy(QF *qf) { assert(qf->runtimedata != NULL); if (qf->runtimedata->locks != NULL) free((void*)qf->runtimedata->locks); if (qf->runtimedata->wait_times != NULL) free(qf->runtimedata->wait_times); if (qf->runtimedata->f_info.filepath != NULL) free(qf->runtimedata->f_info.filepath); free(qf->runtimedata); return (void*)qf->metadata; } __host__ bool qf_malloc(QF *qf, uint64_t nslots, uint64_t key_bits, uint64_t value_bits, enum qf_hashmode hash, bool on_device, uint32_t seed) { uint64_t total_num_bytes = qf_init(qf, nslots, key_bits, value_bits, hash, seed, NULL, 0); //buffer malloc bad? void* buffer = malloc(total_num_bytes); memset(buffer, 0, total_num_bytes); //printf("QF bytes: %llu\n", total_num_bytes); if (buffer == NULL) { perror("Couldn't allocate memory for the CQF."); exit(EXIT_FAILURE); } qf->runtimedata = (qfruntime*)calloc(sizeof(qfruntime), 1); if (qf->runtimedata == NULL) { perror("Couldn't allocate memory for runtime data."); exit(EXIT_FAILURE); } uint64_t init_size = qf_init(qf, nslots, key_bits, value_bits, hash, seed, buffer, total_num_bytes); if (init_size == total_num_bytes) return total_num_bytes; else return -1; } __host__ bool qf_free(QF *qf) { assert(qf->metadata != NULL); void *buffer = qf_destroy(qf); if (buffer != NULL) { free(buffer); return true; } return false; } //consolidate all of the device construction into one convenient func! __host__ void qf_malloc_device(QF** qf, int nbits, bool bulk_config){ //bring in compile #define int rbits = 8; int vbits = 0; QF host_qf; QF temp_device_qf; QF* temp_dev_ptr; uint64_t nslots = 1ULL << nbits; int num_hash_bits = nbits+rbits; qf_malloc(&host_qf, nslots, num_hash_bits, vbits, QF_HASH_INVERTIBLE, false, 0); qf_set_auto_resize(&host_qf, false); qfruntime* _runtime; qfmetadata* _metadata; qfblock* _blocks; uint16_t * dev_locks; uint64_t ** buffers; uint64_t * buffer_sizes; if (bulk_config){ uint64_t num_locks = host_qf.runtimedata->num_locks; //allocate 1 lock so that cudaFree doesn't break later cudaMalloc((void ** )&dev_locks, 1 * sizeof(uint16_t)); //are these 2x necessary? cudaMalloc((void **) & buffer_sizes, 2*num_locks*sizeof(uint64_t)); cudaMalloc((void **)&buffers, 2*num_locks*sizeof(uint64_t*)); } else { //point API, multiply locks cudaMalloc((void ** )&dev_locks, host_qf.runtimedata->num_locks*LOCK_DIST * sizeof(uint16_t)); cudaMemset(dev_locks, 0, host_qf.runtimedata->num_locks*LOCK_DIST * sizeof(uint16_t)); cudaMalloc((void **) & buffer_sizes, 1*sizeof(uint64_t)); cudaMalloc((void **)&buffers, 1*sizeof(uint64_t*)); } //wipe and replace free(host_qf.runtimedata->locks); host_qf.runtimedata->locks = dev_locks; cudaMalloc((void**)&_runtime, sizeof(qfruntime)); cudaMalloc((void**)&_metadata, sizeof(qfmetadata)); cudaMalloc((void**)&_blocks, qf_get_total_size_in_bytes(&host_qf)); //uint64_t num_locks = host_qf.runtimedata->num_locks; //insert these into host_qf so dev qf has access. //they don't need to be wiped as buffers are reset before every insert. host_qf.runtimedata->buffers = buffers; host_qf.runtimedata->buffer_sizes = buffer_sizes; cudaMemcpy(_runtime, host_qf.runtimedata, sizeof(qfruntime), cudaMemcpyHostToDevice); cudaMemcpy(_metadata, host_qf.metadata, sizeof(qfmetadata), cudaMemcpyHostToDevice); cudaMemcpy(_blocks, host_qf.blocks, qf_get_total_size_in_bytes(&host_qf), cudaMemcpyHostToDevice); temp_device_qf.runtimedata = _runtime; temp_device_qf.metadata = _metadata; temp_device_qf.blocks = _blocks; //this might be buggy //request to fill the dev ptr with a QF, then copy over, then copy that to qf cudaMalloc((void **)&temp_dev_ptr, sizeof(QF)); cudaMemcpy(temp_dev_ptr, &temp_device_qf, sizeof(QF), cudaMemcpyHostToDevice); *qf = temp_dev_ptr; } //TODO: make me destroy buffers modifiable __host__ void qf_destroy_device(QF * qf){ QF * host_qf; cudaMallocHost((void ** )&host_qf, sizeof(QF)); cudaMemcpy(host_qf, qf, sizeof(QF), cudaMemcpyDeviceToHost); qfruntime* _runtime; cudaMallocHost((void **) &_runtime, sizeof(qfruntime)); cudaMemcpy(_runtime, host_qf->runtimedata, sizeof(qfruntime), cudaMemcpyDeviceToHost); //may need to have _runtimedata shunted into another host object //ill synchronize before this to double check assert(_runtime != NULL); if (_runtime->locks != NULL) cudaFree(_runtime->locks); if (_runtime->buffers != NULL){ cudaFree(_runtime->buffers); cudaFree(_runtime->buffer_sizes); } if (_runtime->wait_times != NULL) cudaFree(_runtime->wait_times); //this one may break if (_runtime->f_info.filepath != NULL) cudaFree(host_qf->runtimedata->f_info.filepath); cudaFree(host_qf->runtimedata); cudaFree(host_qf->metadata); cudaFree(host_qf->blocks); cudaFreeHost(host_qf); cudaFreeHost(_runtime); } __host__ void qf_copy(QF *dest, const QF *src) { DEBUG_CQF("%s\n","Source CQF"); DEBUG_DUMP(src); memcpy(dest->runtimedata, src->runtimedata, sizeof(qfruntime)); memcpy(dest->metadata, src->metadata, sizeof(qfmetadata)); memcpy(dest->blocks, src->blocks, src->metadata->total_size_in_bytes); DEBUG_CQF("%s\n","Destination CQF after copy."); DEBUG_DUMP(dest); } __host__ void qf_reset(QF *qf) { qf->metadata->nelts = 0; qf->metadata->ndistinct_elts = 0; qf->metadata->noccupied_slots = 0; #ifdef LOG_WAIT_TIME memset(qf->wait_times, 0, (qf->runtimedata->num_locks+1)*sizeof(wait_time_data)); #endif #if QF_BITS_PER_SLOT == 8 || QF_BITS_PER_SLOT == 16 || QF_BITS_PER_SLOT == 32 || QF_BITS_PER_SLOT == 64 memset(qf->blocks, 0, qf->metadata->nblocks* sizeof(qfblock)); #else memset(qf->blocks, 0, qf->metadata->nblocks*(sizeof(qfblock) + QF_SLOTS_PER_BLOCK * qf->metadata->bits_per_slot / 8)); #endif } __host__ int64_t qf_resize_malloc(QF *qf, uint64_t nslots) { QF new_qf; if (!qf_malloc(&new_qf, nslots, qf->metadata->key_bits, qf->metadata->value_bits, qf->metadata->hash_mode, false, qf->metadata->seed)) return -1; if (qf->runtimedata->auto_resize) qf_set_auto_resize(&new_qf, true); // copy keys from qf into new_qf QFi qfi; qf_iterator_from_position(qf, &qfi, 0); int64_t ret_numkeys = 0; do { uint64_t key, value, count; qfi_get_hash(&qfi, &key, &value, &count); qfi_next(&qfi); int ret = qf_insert(&new_qf, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH); if (ret < 0) { printf("Failed to insert key: %ld into the new CQF.\n", key); return ret; } ret_numkeys++; } while(!qfi_end(&qfi)); qf_free(qf); memcpy(qf, &new_qf, sizeof(QF)); return ret_numkeys; } uint64_t qf_resize(QF* qf, uint64_t nslots, void* buffer, uint64_t buffer_len) { printf("QF attempting resize - This will fail\n"); QF new_qf; new_qf.runtimedata = (qfruntime *)calloc(sizeof(qfruntime), 1); if (new_qf.runtimedata == NULL) { perror("Couldn't allocate memory for runtime data.\n"); exit(EXIT_FAILURE); } uint64_t init_size = qf_init(&new_qf, nslots, qf->metadata->key_bits, qf->metadata->value_bits, qf->metadata->hash_mode, qf->metadata->seed, buffer, buffer_len); if (init_size > buffer_len) return init_size; if (qf->runtimedata->auto_resize) qf_set_auto_resize(&new_qf, true); // copy keys from qf into new_qf QFi qfi; qf_iterator_from_position(qf, &qfi, 0); do { uint64_t key, value, count; qfi_get_hash(&qfi, &key, &value, &count); qfi_next(&qfi); int ret = qf_insert(&new_qf, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH); if (ret < 0) { printf("Failed to insert key: %ld into the new CQF.\n", key); abort(); // kill kernel with error } } while(!qfi_end(&qfi)); qf_free(qf); memcpy(qf, &new_qf, sizeof(QF)); return init_size; } __host__ void qf_set_auto_resize(QF* qf, bool enabled) { if (enabled) qf->runtimedata->auto_resize = 1; else qf->runtimedata->auto_resize = 0; } __host__ __device__ qf_returns qf_insert_not_exists(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t flags, uint8_t * retvalue) { // We fill up the CQF up to 95% load factor. // This is a very conservative check. //TODO: GPU resizing /* if (qf_get_num_occupied_slots(qf) >= qf->metadata->nslots * 0.95) { if (qf->runtimedata->auto_resize) { fprintf(stdout, "Resizing the CQF.\n"); if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) < 0) { fprintf(stderr, "Resizing the failed.\n"); return QF_FULL; } } else return QF_FULL; } */ // if (count == 0) // return 0; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); //printf("Inside insert, new hash is recorded as %llu\n", hash); qf_returns ret; if (count == 1) ret = insert1_if_not_exists(qf, hash, retvalue); //for now count is always 1 //else //ret = insert(qf, hash, count, flags); // check for fullness based on the distance from the home slot to the slot // in which the key is inserted /* if (ret == QF_FULL || ret > DISTANCE_FROM_HOME_SLOT_CUTOFF) { float load_factor = qf_get_num_occupied_slots(qf) / (float)qf->metadata->nslots; fprintf(stdout, "Load factor: %lf\n", load_factor); if (qf->runtimedata->auto_resize) { fprintf(stdout, "Resizing the CQF.\n"); if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) > 0) { if (ret == QF_FULL) { if (count == 1) ret = insert1(qf, hash, flags); else ret = insert(qf, hash, count, flags); } fprintf(stderr, "Resize finished.\n"); } else { fprintf(stderr, "Resize failed\n"); ret = QF_FULL; } } else { fprintf(stderr, "The CQF is filling up.\n"); ret = QF_FULL; } } */ return ret; } __device__ qf_returns qf_insert_not_exists_cooperative(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t flags, uint8_t * retvalue, int warpID) { // We fill up the CQF up to 95% load factor. // This is a very conservative check. //TODO: GPU resizing /* if (qf_get_num_occupied_slots(qf) >= qf->metadata->nslots * 0.95) { if (qf->runtimedata->auto_resize) { fprintf(stdout, "Resizing the CQF.\n"); if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) < 0) { fprintf(stderr, "Resizing the failed.\n"); return QF_FULL; } } else return QF_FULL; } */ // if (count == 0) // return 0; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); //printf("Inside insert, new hash is recorded as %llu\n", hash); qf_returns ret; if (count == 1) ret = insert1_if_not_exists_cooperative(qf, hash, retvalue, warpID); //for now count is always 1 //else //ret = insert(qf, hash, count, flags); // check for fullness based on the distance from the home slot to the slot // in which the key is inserted /* if (ret == QF_FULL || ret > DISTANCE_FROM_HOME_SLOT_CUTOFF) { float load_factor = qf_get_num_occupied_slots(qf) / (float)qf->metadata->nslots; fprintf(stdout, "Load factor: %lf\n", load_factor); if (qf->runtimedata->auto_resize) { fprintf(stdout, "Resizing the CQF.\n"); if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) > 0) { if (ret == QF_FULL) { if (count == 1) ret = insert1(qf, hash, flags); else ret = insert(qf, hash, count, flags); } fprintf(stderr, "Resize finished.\n"); } else { fprintf(stderr, "Resize failed\n"); ret = QF_FULL; } } else { fprintf(stderr, "The CQF is filling up.\n"); ret = QF_FULL; } } */ return ret; } __host__ __device__ qf_returns qf_insert(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t flags) { // We fill up the CQF up to 95% load factor. // This is a very conservative check. //TODO: GPU resizing /* if (qf_get_num_occupied_slots(qf) >= qf->metadata->nslots * 0.95) { if (qf->runtimedata->auto_resize) { fprintf(stdout, "Resizing the CQF.\n"); if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) < 0) { fprintf(stderr, "Resizing the failed.\n"); return QF_FULL; } } else return QF_FULL; } */ if (count == 0) return QF_ITEM_INSERTED; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); //printf("Inside insert, new hash is recorded as %llu\n", hash); qf_returns ret; if (count == 1){ ret = insert1(qf, hash, flags); } else { ret = insert(qf, hash, count, flags); } // check for fullness based on the distance from the home slot to the slot // in which the key is inserted /* if (ret == QF_FULL || ret > DISTANCE_FROM_HOME_SLOT_CUTOFF) { float load_factor = qf_get_num_occupied_slots(qf) / (float)qf->metadata->nslots; fprintf(stdout, "Load factor: %lf\n", load_factor); if (qf->runtimedata->auto_resize) { fprintf(stdout, "Resizing the CQF.\n"); if (qf->runtimedata->container_resize(qf, qf->metadata->nslots * 2) > 0) { if (ret == QF_FULL) { if (count == 1) ret = insert1(qf, hash, flags); else ret = insert(qf, hash, count, flags); } fprintf(stderr, "Resize finished.\n"); } else { fprintf(stderr, "Resize failed\n"); ret = QF_FULL; } } else { fprintf(stderr, "The CQF is filling up.\n"); ret = QF_FULL; } } */ return ret; } /*------------------------ GPU Modifications --------------------------*/ //approx filter locking code //locking implementation for the 16 bit locks //undefined behavior if you try to unlock a not locked lock __device__ void lock_16(uint16_t * lock, uint64_t index){ uint16_t zero = 0; uint16_t one = 1; //while (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) != zero); //unsigned short int patch for cuda while (atomicCAS((unsigned short int *) &lock[index*LOCK_DIST], (unsigned short int) zero, (unsigned short int) one) != zero); } __device__ void lock_16_coop(uint16_t * lock, uint64_t index, int warpID){ uint16_t zero = 0; uint16_t one = 1; if (warpID ==0){ //while (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) != zero); //quick patch, cuda wants unsigned short int * while (atomicCAS((unsigned short int *) &lock[index*LOCK_DIST], (unsigned short int) zero, (unsigned short int) one) != zero); } __syncwarp(); } __device__ void unlock_16(uint16_t * lock, uint64_t index){ uint16_t zero = 0; uint16_t one = 1; //atomicCAS((uint16_t *) &lock[index*LOCK_DIST], one, zero); //CUDA CAS Patch atomicCAS((unsigned short int *) &lock[index*LOCK_DIST], (unsigned short int) one, (unsigned short int) zero); } //lock_16 but built to be included as a piece of a while loop // this is more in line with traditional cuda processing, may increase throughput __device__ bool try_lock_16(uint16_t * lock, uint64_t index){ uint16_t zero = 0; uint16_t one = 1; //if (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) == zero){ if (atomicCAS((unsigned short int *) &lock[index*LOCK_DIST], (unsigned short int) zero, (unsigned short int) one) == zero){ return true; } return false; } __device__ bool try_lock_16_coop(uint16_t * lock, uint64_t index, int warpID){ uint16_t zero = 0; uint16_t one = 1; bool ballot = 0; if (warpID == 0){ if (atomicCAS((unsigned short int *) &lock[index*LOCK_DIST], (unsigned short int) zero, (unsigned short int) one) == zero){ //if (atomicCAS((uint16_t *) &lock[index*LOCK_DIST], zero, one) == zero){ ballot = 1; } } ballot = __shfl_sync(0xffffffff, ballot, 0); return ballot; } __device__ __forceinline__ void exchange(uint64_t * arr, uint64_t i, uint64_t j){ uint64_t temp = arr[i]; arr[i] = arr[j]; arr[j] = temp; //maybe synchthreads? } __device__ __forceinline__ void compare(uint64_t * arr, uint64_t i, uint64_t j, bool dir){ if (dir == (arr[i] > arr[j])){ exchange(arr, i, j); } } //return the biggest int of a uint64 __device__ __forceinline__ int biggest_bit(uint64_t n){ return 63 - __clzll((unsigned long long int) n); } __device__ __forceinline__ uint64_t biggest_pow_2(uint64_t n){ return 1UL<<biggest_bit(n)-1; } __global__ void hash_all(QF* qf, uint64_t* vals, uint64_t* hashes, uint64_t nvals, uint8_t flags) { uint64_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= nvals){ return; } uint64_t key = vals[idx]; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) & (qf->metadata->range - 1); else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } //uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); hashes[idx] = key; return; } //revised work pipeline // 1) Set all offsets to keys here based on relative offset + keys - skips the launch call later - TODO: double check that (keys + offset) - keys == offset. -- cpp says this works // 2) subtract sets of keys from each other to get the relative offsets - these will give offsets, last key needs to subtract from origin pointer // this means that the keys here are set to point to the START of their bucket __global__ void set_buffers_binary(QF * qf, uint64_t num_keys, uint64_t * keys, uint8_t flags){ uint64_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= qf->runtimedata->num_locks) return; uint64_t slots_per_lock = NUM_SLOTS_TO_LOCK; //since we are finding all boundaries, we only need //printf("idx %llu\n", idx); //this sounds right? - they divide to go back so I think this is fine uint64_t boundary = (slots_per_lock*idx); //<< qf->metadata->bits_per_slot; //This is the code I'm stealing that assumption from //uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; //uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); //uint64_t lock_index = hash_bucket_index / slots_per_lock; uint64_t lower = 0; uint64_t upper = num_keys; uint64_t index = upper-lower; //upper is non inclusive bound //if we exceed bounds that's our index while (upper != lower){ index = lower + (upper - lower)/2; if ((keys[index] >> qf->metadata->bits_per_slot) < boundary){ //false - the list before this point can be removed lower = index+1; //jump to a new midpoint } else if (index==0){ //will this fix? otherwise need to patch via round up upper = index; } else if ((keys[index-1] >> qf->metadata->bits_per_slot) < boundary) { //set index! this is the first instance where I am valid and the next isnt //buffers[idx] = keys+index; break; } else { //we are too far right, all keys to the right do not matter upper = index; } } //we either exited or have an edge condition: //upper == lower iff 0 or max key index = lower + (upper - lower)/2; qf->runtimedata->buffers[idx] = keys + index; } __global__ void find_clusters(QF* qf, uint64_t * cluster_lengths, uint64_t * max_clusters){ uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x; if (tid != 0) return; uint64_t start_slot = 0; uint64_t i =0; while (start_slot < qf->metadata->nslots){ uint64_t old_start = start_slot; start_slot = find_first_empty_slot(qf, start_slot); if (start_slot == old_start){ start_slot++; } else { cluster_lengths[i] = start_slot-old_start; i++; } } max_clusters[0] = i; } //this can maybe be rolled into set_buffers_binary //it performs an identical set of operations that are O(1) here // O(log n) there, but maybe amortized __global__ void set_buffer_lens(QF * qf, uint64_t num_keys, uint64_t * keys){ uint64_t num_buffers = qf->runtimedata->num_locks; uint64_t idx = threadIdx.x + blockDim.x*blockIdx.x; if (idx >= num_buffers) return; uint64_t** buffers = qf->runtimedata->buffers; uint64_t * buffer_sizes = qf->runtimedata->buffer_sizes; //only 1 thread will diverge - should be fine - any cost already exists because of tail if (idx != num_buffers-1){ //this should work? not 100% convinced but it seems ok buffer_sizes[idx] = buffers[idx+1] - buffers[idx]; } else { buffer_sizes[idx] = num_keys - (buffers[idx] - keys); } return; } //insert from buffers using prehashed_data __global__ void insert_from_buffers_hashed(QF* qf, uint64_t evenness){ //uint64_t num_buffers, uint64_t** buffers, volatile uint64_t * buffer_counts; uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness; if (idx >= qf->runtimedata->num_locks) return; uint64_t * buffer_counts = qf->runtimedata->buffer_sizes; uint64_t ** buffers = qf->runtimedata->buffers; //at the start, we sort //we are exceeding bounds by 1 //quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0); //no need to sort if empty - this will cause overflow as 0-1 == max_uint // if (buffer_counts[idx] > 0) { // quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0); // //assert(assert_sorted(buffers[idx], buffer_counts[idx])); // } uint64_t my_count = buffer_counts[idx]; for (uint64_t i =0; i < my_count; i++){ int ret = qf_insert(qf, buffers[idx][i], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH); //internal threadfence. Bad? actually seems to be fine //__threadfence(); } __threadfence(); } //insert from buffers using prehashed_data //use warp cooperative operations __global__ void insert_from_buffers_cooperative(QF* qf, uint64_t evenness){ //uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness; uint64_t tid = threadIdx.x+blockDim.x*blockIdx.x; uint64_t itemID = tid / 32; int warpID = tid % 32; uint64_t idx = 2*itemID+evenness; if (idx >= qf->runtimedata->num_locks) return; uint64_t * buffer_counts = qf->runtimedata->buffer_sizes; uint64_t ** buffers = qf->runtimedata->buffers; //at the start, we sort //we are exceeding bounds by 1 //quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0); //no need to sort if empty - this will cause overflow as 0-1 == max_uint // if (buffer_counts[idx] > 0) { // quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0); // //assert(assert_sorted(buffers[idx], buffer_counts[idx])); // } //uint64_t - uint64_t should yield offset into vals //uint64_t absolute_offset = buffers[idx]- buffers; uint64_t my_count = buffer_counts[idx]; for (uint64_t i =0; i < my_count; i++){ //assert(keys[absolute_offset+i] == buffers[idx][i]); uint8_t query; qf_returns ret_val = qf_insert_not_exists_cooperative(qf, buffers[idx][i], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH, &query, warpID); #if DEBUG_ASSERTS assert(ret_val != QF_FULL); #endif //internal threadfence. Bad? actually seems to be fine //__threadfence(); } __threadfence(); } __global__ void insert_from_buffers_thrust(QF* qf, uint64_t evenness, uint64_t * keys, uint64_t * vals, uint64_t num_keys){ uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness; if (idx >= qf->runtimedata->num_locks) return; uint64_t * buffer_counts = qf->runtimedata->buffer_sizes; uint64_t ** buffers = qf->runtimedata->buffers; //at the start, we sort //we are exceeding bounds by 1 //quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0); //no need to sort if empty - this will cause overflow as 0-1 == max_uint // if (buffer_counts[idx] > 0) { // quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0); // //assert(assert_sorted(buffers[idx], buffer_counts[idx])); // } //uint64_t - uint64_t should yield offset into vals uint64_t absolute_offset = buffers[idx] - keys; if (absolute_offset >= num_keys){ printf("Offset is %llu, num_keys %llu\n", absolute_offset, num_keys); return; } uint64_t my_count = buffer_counts[idx]; for (uint64_t i =0; i < my_count; i++){ //assert(keys[absolute_offset+i] == buffers[idx][i]); int ret = qf_insert(qf, buffers[idx][i], 0, vals[absolute_offset+i], QF_NO_LOCK | QF_KEY_IS_HASH); //internal threadfence. Bad? actually seems to be fine //__threadfence(); } __threadfence(); } //insert from buffers using prehashed_data __global__ void delete_from_buffers_hashed(QF* qf, uint64_t evenness){ uint64_t idx = 2*(threadIdx.x + blockDim.x * blockIdx.x)+evenness; if (idx >= qf->runtimedata->num_locks) return; uint64_t ** buffers = qf->runtimedata->buffers; uint64_t * buffer_counts = qf->runtimedata->buffer_sizes; //at the start, we sort //we are exceeding bounds by 1 //quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0); //no need to sort if empty - this will cause overflow as 0-1 == max_uint // if (buffer_counts[idx] > 0) { // quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0); // //assert(assert_sorted(buffers[idx], buffer_counts[idx])); // } uint64_t my_count = buffer_counts[idx]; //0 - my count for loop, working backwords should be faster? for (uint64_t i = my_count; i >=1; i--){ int ret = qf_remove(qf, buffers[idx][i-1], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH); //internal threadfence. Bad? actually seems to be fine //__threadfence(); } __threadfence(); } __device__ qf_returns point_insert_not_exists(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags){ uint8_t query; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key % qf->metadata->range; uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits; //uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK; //encode extensions outside of the lock while (true){ if (try_lock_16(qf->runtimedata->locks, lock_index)){ //this can also be a regular lock? //if (try_lock_16(qf->runtimedata->locks, lock_index+1)){ lock_16(qf->runtimedata->locks, lock_index+1); qf_returns ret = qf_insert_not_exists(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH, &query); if (ret == QF_ITEM_FOUND){ returnedVal = query; } __threadfence(); unlock_16(qf->runtimedata->locks, lock_index+1); unlock_16(qf->runtimedata->locks, lock_index); return ret; //} unlock_16(qf->runtimedata->locks, lock_index); } } } __device__ qf_returns point_insert_not_exists_cooperative(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags, int warpID){ uint8_t query; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key % qf->metadata->range; uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits; //uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK; //encode extensions outside of the lock while (true){ if (try_lock_16_coop(qf->runtimedata->locks, lock_index, warpID)){ //this can also be a regular lock? //if (try_lock_16(qf->runtimedata->locks, lock_index+1)){ lock_16_coop(qf->runtimedata->locks, lock_index+1, warpID); qf_returns ret = qf_insert_not_exists_cooperative(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH, &query, warpID); if (ret == QF_ITEM_FOUND){ returnedVal = query; } __threadfence(); if (warpID ==0){ unlock_16(qf->runtimedata->locks, lock_index+1); unlock_16(qf->runtimedata->locks, lock_index); } return ret; //} if (warpID ==0) unlock_16(qf->runtimedata->locks, lock_index); } } } __device__ int point_remove(QF* qf, uint64_t key, uint8_t value, uint8_t flags){ if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key % qf->metadata->range; uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits; //uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK; //encode extensions outside of the lock while (true){ if (try_lock_16(qf->runtimedata->locks, lock_index)){ //this can also be a regular lock? //if (try_lock_16(qf->runtimedata->locks, lock_index+1)){ lock_16(qf->runtimedata->locks, lock_index+1); int ret = qf_remove(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH); __threadfence(); unlock_16(qf->runtimedata->locks, lock_index+1); unlock_16(qf->runtimedata->locks, lock_index); return ret; //} unlock_16(qf->runtimedata->locks, lock_index); } } } __device__ qf_returns point_insert(QF* qf, uint64_t key, uint8_t value, uint8_t flags){ if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key % qf->metadata->range; uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits; //uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK; //encode extensions outside of the lock while (true){ if (try_lock_16(qf->runtimedata->locks, lock_index)){ //this can also be a regular lock? //if (try_lock_16(qf->runtimedata->locks, lock_index+1)){ lock_16(qf->runtimedata->locks, lock_index+1); qf_returns ret = qf_insert(qf, hash, value, 1, QF_NO_LOCK | QF_KEY_IS_HASH); __threadfence(); unlock_16(qf->runtimedata->locks, lock_index+1); unlock_16(qf->runtimedata->locks, lock_index); return ret; //} unlock_16(qf->runtimedata->locks, lock_index); } } } __device__ uint64_t point_query(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags){ if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key % qf->metadata->range; uint64_t query; uint64_t ret = qf_query(qf, hash, &query, QF_NO_LOCK | QF_KEY_IS_HASH); returnedVal = query; return ret; } __device__ uint64_t point_query_concurrent(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags){ if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key % qf->metadata->range; uint64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits; //uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; uint64_t lock_index = hash_bucket_index / NUM_SLOTS_TO_LOCK; //encode extensions outside of the lock while (true){ if (try_lock_16(qf->runtimedata->locks, lock_index)){ //this can also be a regular lock? //if (try_lock_16(qf->runtimedata->locks, lock_index+1)){ lock_16(qf->runtimedata->locks, lock_index+1); uint64_t query; uint64_t ret = qf_query(qf, hash, &query, QF_NO_LOCK | QF_KEY_IS_HASH); __threadfence(); unlock_16(qf->runtimedata->locks, lock_index+1); unlock_16(qf->runtimedata->locks, lock_index); returnedVal = query; return ret; //} unlock_16(qf->runtimedata->locks, lock_index); } } } __global__ void point_bulk_get(QF * qf, uint64_t * hashes, uint64_t nitems, uint64_t * counter){ uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >=nitems) return; uint8_t query; //point_query(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags) if (point_query(qf, hashes[tid] % qf->metadata->range, 0, query, QF_NO_LOCK) ==0){ //on item not found increment atomicAdd((unsigned long long int *) counter, (unsigned long long int) 1); } } __global__ void point_bulk_get_nocount(QF * qf, uint64_t * hashes, uint64_t nitems){ uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >=nitems) return; uint8_t query; //point_query(QF* qf, uint64_t key, uint8_t value, uint8_t& returnedVal, uint8_t flags) point_query(qf, hashes[tid] % qf->metadata->range, 0, query, QF_NO_LOCK); } __global__ void bulk_get_cooperative(QF * qf, uint64_t * hashes, uint64_t nitems, uint64_t * counter){ uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x; uint64_t itemID = tid /32; int warpID = tid % 32; if (itemID >= qf->runtimedata->num_locks) return; uint64_t * buffer_counts = qf->runtimedata->buffer_sizes; uint64_t ** buffers = qf->runtimedata->buffers; //at the start, we sort //we are exceeding bounds by 1 //quick_sort(buffers[idx], 0, buffer_counts[idx]-1,0); //no need to sort if empty - this will cause overflow as 0-1 == max_uint // if (buffer_counts[idx] > 0) { // quick_sort(buffers[idx], 0, buffer_counts[idx]-1, 0); // //assert(assert_sorted(buffers[idx], buffer_counts[idx])); // } uint64_t my_count = buffer_counts[itemID]; for (uint64_t i =warpID; i < my_count; i+=32){ //int ret = qf_insert(qf, buffers[itemID][i], 0, 1, QF_NO_LOCK | QF_KEY_IS_HASH); uint8_t query; if (point_query(qf, buffers[itemID][i] % qf->metadata->range, 0, query, QF_NO_LOCK | QF_KEY_IS_HASH) ==0){ //atomicAdd((unsigned long long int *) counter, (unsigned long long int) 1); } //internal threadfence. Bad? actually seems to be fine //__threadfence(); } } __host__ uint64_t cooperative_bulk_get_wrapper(QF * qf, uint64_t * hashes, uint64_t nitems){ auto start = std::chrono::high_resolution_clock::now(); uint64_t key_block_size = 32; //start with num_locks, get counts //This is slow, but there isn't a better way to do it //we'll have to see how this affects performance uint64_t * dev_num_locks; cudaMallocManaged((void **)&dev_num_locks, sizeof(uint64_t)); get_dev_nvals<<<1,1>>>(qf, dev_num_locks); cudaDeviceSynchronize(); uint64_t num_locks = dev_num_locks[0]; cudaFree(dev_num_locks); uint64_t key_block = (nitems-1)/key_block_size + 1; //keys are hashed, now need to treat them as hashed in all further functions hash_all<<<key_block, key_block_size>>>(qf, hashes, hashes, nitems, 0); thrust::sort(thrust::device, hashes, hashes+nitems); set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nitems, hashes, 0); set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nitems, hashes); uint64_t * misses; //this is fine, should never be triggered cudaMallocManaged((void **)&misses, sizeof(uint64_t)); cudaMemset(misses, 0, sizeof(uint64_t)); cudaDeviceSynchronize(); auto midpoint = std::chrono::high_resolution_clock::now(); const int bulk_block_size = 1024; bulk_get_cooperative<<<(nitems*32-1)/bulk_block_size+1, bulk_block_size>>>(qf, hashes, nitems, misses); cudaDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> sort_diff = midpoint-start; std::chrono::duration<double> diff = end-midpoint; std::cout << "sorted " << nitems << " in " << sort_diff.count() << " seconds\n"; std::cout << "Queried " << nitems << " in " << diff.count() << " seconds\n"; uint64_t output = misses[0]; cudaFree(misses); return output; } __host__ uint64_t point_get_wrapper(QF * qf, uint64_t * hashes, uint64_t nitems){ // uint64_t * misses; // //this is fine, should never be triggered // cudaMallocManaged((void **)&misses, sizeof(uint64_t)); // cudaMemset(misses, 0, sizeof(uint64_t)); point_bulk_get_nocount<<<(nitems-1)/512+1, 512>>>(qf, hashes, nitems); cudaDeviceSynchronize(); // uint64_t toReturn = *misses; // cudaFree(misses); // return toReturn; return 0; } __host__ uint64_t point_get_wrapper_fp(QF * qf, uint64_t * hashes, uint64_t nitems){ uint64_t * misses; //this is fine, should never be triggered cudaMallocManaged((void **)&misses, sizeof(uint64_t)); cudaMemset(misses, 0, sizeof(uint64_t)); point_bulk_get<<<(nitems-1)/512+1, 512>>>(qf, hashes, nitems, misses); cudaDeviceSynchronize(); uint64_t toReturn = *misses; cudaFree(misses); return toReturn; //return 0; } __global__ void point_bulk_insert(QF * qf, uint64_t * hashes, uint64_t nitems){ uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >=nitems) return; //#if DROP_ON_RUNEND point_insert(qf, hashes[tid], 0, 0); // #else // assert(point_insert(qf, hashes[tid], 0, 0) != QF_FULL); // #endif } __global__ void point_bulk_insert_cooperative(QF * qf, uint64_t * hashes, uint64_t nitems){ uint64_t itemID = threadIdx.x + blockIdx.x * blockDim.x; uint64_t tid = itemID / 32; int warpID = itemID % 32; if (tid >=nitems) return; uint8_t retvalue; assert(point_insert_not_exists_cooperative(qf, hashes[tid], 0, retvalue, 0, warpID) != QF_FULL); } //set a uint64_t reference to point at device memory; __global__ void get_dev_nvals(QF* qf, uint64_t * external_nvals){ uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid >= 1) return; external_nvals[0] = qf->runtimedata->num_locks; } //modified version of buffers_provided - performs an initial bulk hash, should save work over other versions //note: this DOES modify the given buffer - fine for all versions now //This variant performs an ititial sort that allows us to save time overall //as we avoid the atomic count-off and any sort of cross-thread communication __host__ void bulk_insert(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) { uint64_t key_block_size = 32; uint64_t key_block = (nvals -1)/key_block_size + 1; //start with num_locks, get counts //This is slow, but there isn't a better way to do it //we'll have to see how this affects performance uint64_t * dev_num_locks; cudaMallocManaged((void **)&dev_num_locks, sizeof(uint64_t)); get_dev_nvals<<<1,1>>>(qf, dev_num_locks); cudaDeviceSynchronize(); uint64_t num_locks = dev_num_locks[0]; cudaFree(dev_num_locks); //keys are hashed, now need to treat them as hashed in all further functions hash_all<<<key_block, key_block_size>>>(qf, keys, keys, nvals, flags); thrust::sort(thrust::device, keys, keys+nvals); set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys, flags); set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys); //insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes); //return; const int bulk_block_size = 32; uint64_t evenness = 0; insert_from_buffers_hashed<<<(num_locks-1)/bulk_block_size+1, bulk_block_size>>>(qf, evenness); evenness = 1; insert_from_buffers_hashed<<<(num_locks-1)/bulk_block_size+1, bulk_block_size>>>(qf, evenness); } __host__ void bulk_insert_cooperative(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) { uint64_t key_block_size = 32; uint64_t key_block = (nvals -1)/key_block_size + 1; //start with num_locks, get counts //This is slow, but there isn't a better way to do it //we'll have to see how this affects performance uint64_t * dev_num_locks; cudaMallocManaged((void **)&dev_num_locks, sizeof(uint64_t)); get_dev_nvals<<<1,1>>>(qf, dev_num_locks); cudaDeviceSynchronize(); uint64_t num_locks = dev_num_locks[0]; cudaFree(dev_num_locks); //keys are hashed, now need to treat them as hashed in all further functions hash_all<<<key_block, key_block_size>>>(qf, keys, keys, nvals, flags); thrust::sort(thrust::device, keys, keys+nvals); set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys, flags); set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys); //insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes); //return; uint64_t evenness = 0; insert_from_buffers_cooperative<<<(32*num_locks-1)/key_block_size+1, key_block_size>>>(qf, evenness); evenness = 1; insert_from_buffers_cooperative<<<(32*num_locks-1)/key_block_size+1, key_block_size>>>(qf, evenness); } //modified version of buffers_provided - performs an initial bulk hash, should save work over other versions //note: this DOES modify the given buffer - fine for all versions now //This variant performs an ititial sort that allows us to save time overall //as we avoid the atomic count-off and any sort of cross-thread communication __host__ void bulk_insert_reduce(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) { uint64_t key_block_size = 32; uint64_t key_block = (nvals -1)/key_block_size + 1; //start with num_locks, get counts //This is slow, but there isn't a better way to uint64_t * dev_num_locks; cudaMallocManaged((void **)&dev_num_locks, sizeof(uint64_t)); get_dev_nvals<<<1,1>>>(qf, dev_num_locks); cudaDeviceSynchronize(); uint64_t num_locks = dev_num_locks[0]; cudaFree(dev_num_locks); //keys are hashed, now need to treat them as hashed in all further functions hash_all<<<key_block, key_block_size>>>(qf, keys, keys, nvals, flags); thrust::sort(thrust::device, keys, keys+nvals); thrust::device_ptr<uint64_t> keys_ptr(keys); thrust::device_ptr<uint64_t> dupe_counts= thrust::device_malloc<uint64_t>(nvals); thrust::fill(dupe_counts, dupe_counts+nvals, 1); thrust::device_ptr<uint64_t> thrust_keys = thrust::device_malloc<uint64_t>(nvals); thrust::device_ptr <uint64_t> thrust_vals = thrust::device_malloc<uint64_t>(nvals); thrust::pair<thrust::device_ptr<uint64_t>,thrust::device_ptr<uint64_t>> new_end; new_end = thrust::reduce_by_key(thrust::device, keys_ptr, keys_ptr+nvals, dupe_counts, thrust_keys, thrust_vals); cudaDeviceSynchronize(); uint64_t new_nvals = new_end.first - thrust_keys; printf("New nvals %llu\n", new_nvals); printf("Error after this is pointer cast?\n"); uint64_t * new_keys = thrust::raw_pointer_cast(thrust_keys); uint64_t * new_key_counts = thrust::raw_pointer_cast(thrust_vals); cudaDeviceSynchronize(); printf("Error after this in binary?\n"); //set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, new_nvals, slots_per_lock, new_keys, num_locks, buffers, flags); set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, new_nvals, new_keys, flags); //set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, new_nvals, new_keys, num_locks, (uint64_t *) buffer_sizes, buffers); set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, new_nvals, new_keys); //insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes); //return; cudaDeviceSynchronize(); printf("Thrust buffers attached\n"); uint64_t evenness = 0; insert_from_buffers_thrust<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, evenness, new_keys,new_key_counts, new_nvals); evenness = 1; insert_from_buffers_thrust<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, evenness, new_keys, new_key_counts, new_nvals); cudaDeviceSynchronize(); printf("Insertion done.\n"); //free resources thrust::device_free(thrust_keys); thrust::device_free(thrust_vals); thrust::device_free(dupe_counts); } __host__ void bulk_delete(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags) { uint64_t key_block_size = 32; uint64_t key_block = (nvals -1)/key_block_size + 1; //start with num_locks, get counts //This is slow, but there isn't a better way to uint64_t * dev_num_locks; cudaMallocManaged((void **)&dev_num_locks, sizeof(uint64_t)); get_dev_nvals<<<1,1>>>(qf, dev_num_locks); cudaDeviceSynchronize(); uint64_t num_locks = dev_num_locks[0]; cudaFree(dev_num_locks); //keys are hashed, now need to treat them as hashed in all further functions hash_all<<<key_block, key_block_size>>>(qf, keys, keys, nvals, flags); thrust::sort(thrust::device, keys, keys+nvals); //set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, slots_per_lock, keys, num_locks, buffers, flags); set_buffers_binary<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys, flags); //set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys, num_locks, (uint64_t *) buffer_sizes, buffers); set_buffer_lens<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, nvals, keys); //insert_from_buffers_hashed_onepass<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, num_locks, buffers, buffer_sizes); //return; uint64_t evenness = 0; delete_from_buffers_hashed<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, evenness); evenness = 1; delete_from_buffers_hashed<<<(num_locks-1)/key_block_size+1, key_block_size>>>(qf, evenness); } __global__ void bulk_get_nocount(QF * qf, uint64_t * vals, uint64_t nvals, uint64_t key_count, uint8_t flags){ uint64_t tid = threadIdx.x+blockDim.x*blockIdx.x; if (tid >= nvals) return; uint64_t count = qf_count_key_value(qf, vals[tid], 0, 0); return; } __global__ void bulk_get_misses(QF * qf, uint64_t * vals, uint64_t nvals, uint64_t key_count, uint64_t * counter, uint8_t flags){ uint64_t tid = threadIdx.x + blockDim.x * blockIdx.x; //should never happen, but just in case if (tid >= nvals) return; uint64_t count = qf_count_key_value(qf, vals[tid], 0, 0); if (count < key_count) { atomicAdd((long long unsigned int *)counter, (long long unsigned int) 1); } } __global__ void bulk_get_hits_kernel(QF * qf, uint64_t * vals, bool * hits, uint64_t nvals){ uint64_t tid = threadIdx.x + blockDim.x * blockIdx.x; //should never happen, but just in case if (tid >= nvals) return; uint64_t count = qf_count_key_value(qf, vals[tid], 0, 0); if (count >= 1) { hits[tid] = 1; } } __host__ void qf_bulk_insert(QF* qf, uint64_t nvals, uint64_t* keys, uint8_t flags){ bulk_insert(qf, nvals, keys, flags); } __host__ void bulk_get_hits(QF * qf, uint64_t * vals, bool * hits, uint64_t nvals){ bulk_get_hits_kernel<<<(nvals -1)/512+1, 512>>>(qf, vals, hits, nvals); } __global__ void bulk_get_kernel(QF * qf, uint64_t * vals, uint64_t nvals, uint64_t * returns, uint8_t flags){ uint64_t tid = threadIdx.x + blockDim.x * blockIdx.x; //should never happen, but just in case if (tid >= nvals) return; returns[tid] = qf_count_key_value(qf, vals[tid], 0, flags); } __host__ void bulk_get(QF * qf, uint64_t nvals, uint64_t * vals, uint64_t * returns){ bulk_get_kernel<<<(nvals-1)/512+1, 512>>>(qf, vals, nvals, returns, QF_NO_LOCK); } __host__ uint64_t bulk_get_misses_wrapper(QF * qf, uint64_t * vals, uint64_t nvals){ uint64_t * misses; //this is fine, should never be triggered cudaMallocManaged((void **)&misses, sizeof(uint64_t)); cudaMemset(misses, 0, sizeof(uint64_t)); bulk_get_misses<<<(nvals-1)/512+1, 512>>>(qf, vals, nvals, 1, misses, QF_NO_LOCK); cudaDeviceSynchronize(); uint64_t toReturn = *misses; cudaFree(misses); return toReturn; //return 0; } //this bad boy doesn't check __host__ uint64_t bulk_get_nocount_wrapper(QF * qf, uint64_t * vals, uint64_t nvals){ bulk_get_nocount<<<(nvals-1)/512+1, 512>>>(qf, vals, nvals, 1, QF_NO_LOCK); cudaDeviceSynchronize(); return 0; //return 0; } __host__ __device__ int qf_set_count(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t flags) { if (count == 0) return 0; uint64_t cur_count = qf_count_key_value(qf, key, value, flags); int64_t delta = count - cur_count; int ret; if (delta == 0) ret = 0; else if (delta > 0) ret = qf_insert(qf, key, value, delta, flags); else ret = qf_remove(qf, key, value, labs(delta), flags); return ret; } __host__ __device__ int qf_remove(QF *qf, uint64_t key, uint64_t value, uint64_t count, uint8_t flags) { if (count == 0) return true; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); return _remove(qf, hash, count, flags); } __host__ __device__ int qf_delete_key_value(QF *qf, uint64_t key, uint64_t value, uint8_t flags) { uint64_t count = qf_count_key_value(qf, key, value, flags); if (count == 0) return true; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); return _remove(qf, hash, count, flags); } __host__ __device__ uint64_t qf_count_key_value(const QF *qf, uint64_t key, uint64_t value, uint8_t flags) { if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); int64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; if (!is_occupied(qf, hash_bucket_index)) return 0; int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index-1) + 1; if (runstart_index < hash_bucket_index) runstart_index = hash_bucket_index; /* printf("MC RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t current_remainder, current_count, current_end; do { current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); if (current_remainder == hash_remainder) return current_count; runstart_index = current_end + 1; } while (!is_runend(qf, current_end)); return 0; } __host__ __device__ uint64_t qf_query(const QF *qf, uint64_t key, uint64_t *value, uint8_t flags) { if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = key; uint64_t hash_remainder = hash & BITMASK(qf->metadata->key_remainder_bits); int64_t hash_bucket_index = hash >> qf->metadata->key_remainder_bits; if (!is_occupied(qf, hash_bucket_index)) return 0; int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index-1) + 1; if (runstart_index < hash_bucket_index) runstart_index = hash_bucket_index; /* printf("MC RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t current_remainder, current_count, current_end; do { current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); *value = current_remainder & BITMASK(qf->metadata->value_bits); current_remainder = current_remainder >> qf->metadata->value_bits; if (current_remainder == hash_remainder) { return current_count; } runstart_index = current_end + 1; } while (!is_runend(qf, current_end)); return 0; } __host__ __device__ int64_t qf_get_unique_index(const QF *qf, uint64_t key, uint64_t value, uint8_t flags) { if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); int64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; if (!is_occupied(qf, hash_bucket_index)) return QF_DOESNT_EXIST; int64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index-1) + 1; if (runstart_index < hash_bucket_index) runstart_index = hash_bucket_index; /* printf("MC RUNSTART: %02lx RUNEND: %02lx\n", runstart_index, runend_index); */ uint64_t current_remainder, current_count, current_end; do { current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); if (current_remainder == hash_remainder) return runstart_index; runstart_index = current_end + 1; } while (!is_runend(qf, current_end)); return QF_DOESNT_EXIST; } enum qf_hashmode qf_get_hashmode(const QF *qf) { return qf->metadata->hash_mode; } uint64_t qf_get_hash_seed(const QF *qf) { return qf->metadata->seed; } __uint64_t qf_get_hash_range(const QF *qf) { return qf->metadata->range; } bool qf_is_auto_resize_enabled(const QF *qf) { if (qf->runtimedata->auto_resize == 1) return true; return false; } uint64_t qf_get_total_size_in_bytes(const QF *qf) { return qf->metadata->total_size_in_bytes; } uint64_t qf_get_nslots(const QF *qf) { return qf->metadata->nslots; } uint64_t qf_get_num_occupied_slots(const QF *qf) { pc_sync(&qf->runtimedata->pc_noccupied_slots); return qf->metadata->noccupied_slots; } uint64_t qf_get_num_key_bits(const QF *qf) { return qf->metadata->key_bits; } uint64_t qf_get_num_value_bits(const QF *qf) { return qf->metadata->value_bits; } uint64_t qf_get_num_key_remainder_bits(const QF *qf) { return qf->metadata->key_remainder_bits; } uint64_t qf_get_bits_per_slot(const QF *qf) { return qf->metadata->bits_per_slot; } uint64_t qf_get_sum_of_counts(const QF *qf) { pc_sync(&qf->runtimedata->pc_nelts); return qf->metadata->nelts; } uint64_t qf_get_num_distinct_key_value_pairs(const QF *qf) { pc_sync(&qf->runtimedata->pc_ndistinct_elts); return qf->metadata->ndistinct_elts; } void qf_sync_counters(const QF *qf) { pc_sync(&qf->runtimedata->pc_ndistinct_elts); pc_sync(&qf->runtimedata->pc_nelts); pc_sync(&qf->runtimedata->pc_noccupied_slots); } /* initialize the iterator at the run corresponding * to the position index */ int64_t qf_iterator_from_position(const QF *qf, QFi *qfi, uint64_t position) { if (position == 0xffffffffffffffff) { qfi->current = 0xffffffffffffffff; qfi->qf = qf; return QFI_INVALID; } assert(position < qf->metadata->nslots); if (!is_occupied(qf, position)) { uint64_t block_index = position; uint64_t idx = bitselect(get_block(qf, block_index)->occupieds[0], 0); if (idx == 64) { while(idx == 64 && block_index < qf->metadata->nblocks) { block_index++; idx = bitselect(get_block(qf, block_index)->occupieds[0], 0); } } position = block_index * QF_SLOTS_PER_BLOCK + idx; } qfi->qf = qf; qfi->num_clusters = 0; qfi->run = position; qfi->current = position == 0 ? 0 : run_end(qfi->qf, position-1) + 1; if (qfi->current < position) qfi->current = position; #ifdef LOG_CLUSTER_LENGTH qfi->c_info = (cluster_data* )calloc(qf->metadata->nslots/32, sizeof(cluster_data)); if (qfi->c_info == NULL) { perror("Couldn't allocate memory for c_info."); exit(EXIT_FAILURE); } qfi->cur_start_index = position; qfi->cur_length = 1; #endif if (qfi->current >= qf->metadata->nslots) return QFI_INVALID; return qfi->current; } int64_t qf_iterator_from_key_value(const QF *qf, QFi *qfi, uint64_t key, uint64_t value, uint8_t flags) { if (key >= qf->metadata->range) { qfi->current = 0xffffffffffffffff; qfi->qf = qf; return QFI_INVALID; } qfi->qf = qf; qfi->num_clusters = 0; if (GET_KEY_HASH(flags) != QF_KEY_IS_HASH) { if (qf->metadata->hash_mode == QF_HASH_DEFAULT) key = MurmurHash64A(((void *)&key), sizeof(key), qf->metadata->seed) % qf->metadata->range; else if (qf->metadata->hash_mode == QF_HASH_INVERTIBLE) key = hash_64(key, BITMASK(qf->metadata->key_bits)); } uint64_t hash = (key << qf->metadata->value_bits) | (value & BITMASK(qf->metadata->value_bits)); uint64_t hash_remainder = hash & BITMASK(qf->metadata->bits_per_slot); uint64_t hash_bucket_index = hash >> qf->metadata->bits_per_slot; bool flag = false; // If a run starts at "position" move the iterator to point it to the // smallest key greater than or equal to "hash". if (is_occupied(qf, hash_bucket_index)) { uint64_t runstart_index = hash_bucket_index == 0 ? 0 : run_end(qf, hash_bucket_index-1) + 1; if (runstart_index < hash_bucket_index) runstart_index = hash_bucket_index; uint64_t current_remainder, current_count, current_end; do { current_end = decode_counter(qf, runstart_index, &current_remainder, &current_count); if (current_remainder >= hash_remainder) { flag = true; break; } runstart_index = current_end + 1; } while (!is_runend(qf, current_end)); // found "hash" or smallest key greater than "hash" in this run. if (flag) { qfi->run = hash_bucket_index; qfi->current = runstart_index; } } // If a run doesn't start at "position" or the largest key in the run // starting at "position" is smaller than "hash" then find the start of the // next run. if (!is_occupied(qf, hash_bucket_index) || !flag) { uint64_t position = hash_bucket_index; assert(position < qf->metadata->nslots); uint64_t block_index = position / QF_SLOTS_PER_BLOCK; uint64_t idx = bitselect(get_block(qf, block_index)->occupieds[0], 0); if (idx == 64) { while(idx == 64 && block_index < qf->metadata->nblocks) { block_index++; idx = bitselect(get_block(qf, block_index)->occupieds[0], 0); } } position = block_index * QF_SLOTS_PER_BLOCK + idx; qfi->run = position; qfi->current = position == 0 ? 0 : run_end(qfi->qf, position-1) + 1; if (qfi->current < position) qfi->current = position; } if (qfi->current >= qf->metadata->nslots) return QFI_INVALID; return qfi->current; } static int qfi_get(const QFi *qfi, uint64_t *key, uint64_t *value, uint64_t *count) { if (qfi_end(qfi)) return QFI_INVALID; uint64_t current_remainder, current_count; decode_counter(qfi->qf, qfi->current, &current_remainder, &current_count); *value = current_remainder & BITMASK(qfi->qf->metadata->value_bits); current_remainder = current_remainder >> qfi->qf->metadata->value_bits; *key = (qfi->run << qfi->qf->metadata->key_remainder_bits) | current_remainder; *count = current_count; return 0; } int qfi_get_key(const QFi *qfi, uint64_t *key, uint64_t *value, uint64_t *count) { *key = *value = *count = 0; int ret = qfi_get(qfi, key, value, count); if (ret == 0) { if (qfi->qf->metadata->hash_mode == QF_HASH_DEFAULT) { *key = 0; *value = 0; *count = 0; return QF_INVALID; } else if (qfi->qf->metadata->hash_mode == QF_HASH_INVERTIBLE) *key = hash_64i(*key, BITMASK(qfi->qf->metadata->key_bits)); } return ret; } int qfi_get_hash(const QFi *qfi, uint64_t *key, uint64_t *value, uint64_t *count) { *key = *value = *count = 0; return qfi_get(qfi, key, value, count); } int qfi_next(QFi *qfi) { if (qfi_end(qfi)) return QFI_INVALID; else { /* move to the end of the current counter*/ uint64_t current_remainder, current_count; qfi->current = decode_counter(qfi->qf, qfi->current, &current_remainder, &current_count); if (!is_runend(qfi->qf, qfi->current)) { qfi->current++; #ifdef LOG_CLUSTER_LENGTH qfi->cur_length++; #endif if (qfi_end(qfi)) return QFI_INVALID; return 0; } else { #ifdef LOG_CLUSTER_LENGTH /* save to check if the new current is the new cluster. */ uint64_t old_current = qfi->current; #endif uint64_t block_index = qfi->run / QF_SLOTS_PER_BLOCK; uint64_t rank = bitrank(get_block(qfi->qf, block_index)->occupieds[0], qfi->run % QF_SLOTS_PER_BLOCK); uint64_t next_run = bitselect(get_block(qfi->qf, block_index)->occupieds[0], rank); if (next_run == 64) { rank = 0; while (next_run == 64 && block_index < qfi->qf->metadata->nblocks) { block_index++; next_run = bitselect(get_block(qfi->qf, block_index)->occupieds[0], rank); } } if (block_index == qfi->qf->metadata->nblocks) { /* set the index values to max. */ qfi->run = qfi->current = qfi->qf->metadata->xnslots; return QFI_INVALID; } qfi->run = block_index * QF_SLOTS_PER_BLOCK + next_run; qfi->current++; if (qfi->current < qfi->run) qfi->current = qfi->run; #ifdef LOG_CLUSTER_LENGTH if (qfi->current > old_current + 1) { /* new cluster. */ if (qfi->cur_length > 10) { qfi->c_info[qfi->num_clusters].start_index = qfi->cur_start_index; qfi->c_info[qfi->num_clusters].length = qfi->cur_length; qfi->num_clusters++; } qfi->cur_start_index = qfi->run; qfi->cur_length = 1; } else { qfi->cur_length++; } #endif return 0; } } } bool qfi_end(const QFi *qfi) { if (qfi->current >= qfi->qf->metadata->xnslots /*&& is_runend(qfi->qf, qfi->current)*/) return true; return false; } /* * Merge qfa and qfb into qfc */ /* * iterate over both qf (qfa and qfb) * simultaneously * for each index i * min(get_value(qfa, ia) < get_value(qfb, ib)) * insert(min, ic) * increment either ia or ib, whichever is minimum. */ void qf_merge(const QF *qfa, const QF *qfb, QF *qfc) { QFi qfia, qfib; qf_iterator_from_position(qfa, &qfia, 0); qf_iterator_from_position(qfb, &qfib, 0); if (qfa->metadata->hash_mode != qfc->metadata->hash_mode && qfa->metadata->seed != qfc->metadata->seed && qfb->metadata->hash_mode != qfc->metadata->hash_mode && qfb->metadata->seed != qfc->metadata->seed) { fprintf(stderr, "Output QF and input QFs do not have the same hash mode or seed.\n"); exit(1); } uint64_t keya, valuea, counta, keyb, valueb, countb; qfi_get_hash(&qfia, &keya, &valuea, &counta); qfi_get_hash(&qfib, &keyb, &valueb, &countb); do { if (keya < keyb) { qf_insert(qfc, keya, valuea, counta, QF_NO_LOCK | QF_KEY_IS_HASH); qfi_next(&qfia); qfi_get_hash(&qfia, &keya, &valuea, &counta); } else { qf_insert(qfc, keyb, valueb, countb, QF_NO_LOCK | QF_KEY_IS_HASH); qfi_next(&qfib); qfi_get_hash(&qfib, &keyb, &valueb, &countb); } } while(!qfi_end(&qfia) && !qfi_end(&qfib)); if (!qfi_end(&qfia)) { do { qfi_get_hash(&qfia, &keya, &valuea, &counta); qf_insert(qfc, keya, valuea, counta, QF_NO_LOCK | QF_KEY_IS_HASH); } while(!qfi_next(&qfia)); } if (!qfi_end(&qfib)) { do { qfi_get_hash(&qfib, &keyb, &valueb, &countb); qf_insert(qfc, keyb, valueb, countb, QF_NO_LOCK | QF_KEY_IS_HASH); } while(!qfi_next(&qfib)); } } /* * Merge an array of qfs into the resultant QF */ void qf_multi_merge(const QF *qf_arr[], int nqf, QF *qfr) { int i; QFi qfi_arr[nqf]; int smallest_idx = 0; uint64_t smallest_key = UINT64_MAX; for (i=0; i<nqf; i++) { if (qf_arr[i]->metadata->hash_mode != qfr->metadata->hash_mode && qf_arr[i]->metadata->seed != qfr->metadata->seed) { fprintf(stderr, "Output QF and input QFs do not have the same hash mode or seed.\n"); exit(1); } qf_iterator_from_position(qf_arr[i], &qfi_arr[i], 0); } DEBUG_CQF("Merging %d CQFs\n", nqf); for (i=0; i<nqf; i++) { DEBUG_CQF("CQF %d\n", i); DEBUG_DUMP(qf_arr[i]); } while (nqf > 1) { uint64_t keys[nqf]; uint64_t values[nqf]; uint64_t counts[nqf]; for (i=0; i<nqf; i++) qfi_get_hash(&qfi_arr[i], &keys[i], &values[i], &counts[i]); do { smallest_key = UINT64_MAX; for (i=0; i<nqf; i++) { if (keys[i] < smallest_key) { smallest_key = keys[i]; smallest_idx = i; } } qf_insert(qfr, keys[smallest_idx], values[smallest_idx], counts[smallest_idx], QF_NO_LOCK | QF_KEY_IS_HASH); qfi_next(&qfi_arr[smallest_idx]); qfi_get_hash(&qfi_arr[smallest_idx], &keys[smallest_idx], &values[smallest_idx], &counts[smallest_idx]); } while(!qfi_end(&qfi_arr[smallest_idx])); /* remove the qf that is exhausted from the array */ if (smallest_idx < nqf-1) memmove(&qfi_arr[smallest_idx], &qfi_arr[smallest_idx+1], (nqf-smallest_idx-1)*sizeof(qfi_arr[0])); nqf--; } if (!qfi_end(&qfi_arr[0])) { uint64_t iters = 0; do { uint64_t key, value, count; qfi_get_hash(&qfi_arr[0], &key, &value, &count); qf_insert(qfr, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH); qfi_next(&qfi_arr[0]); iters++; } while(!qfi_end(&qfi_arr[0])); DEBUG_CQF("Num of iterations: %lu\n", iters); } DEBUG_CQF("%s", "Final CQF after merging.\n"); DEBUG_DUMP(qfr); return; } /* find cosine similarity between two QFs. */ uint64_t qf_inner_product(const QF *qfa, const QF *qfb) { uint64_t acc = 0; QFi qfi; const QF *qf_mem, *qf_disk; if (qfa->metadata->hash_mode != qfb->metadata->hash_mode && qfa->metadata->seed != qfb->metadata->seed) { fprintf(stderr, "Input QFs do not have the same hash mode or seed.\n"); exit(1); } // create the iterator on the larger QF. if (qfa->metadata->total_size_in_bytes > qfb->metadata->total_size_in_bytes) { qf_mem = qfb; qf_disk = qfa; } else { qf_mem = qfa; qf_disk = qfb; } qf_iterator_from_position(qf_disk, &qfi, 0); do { uint64_t key = 0, value = 0, count = 0; uint64_t count_mem; qfi_get_hash(&qfi, &key, &value, &count); if ((count_mem = qf_count_key_value(qf_mem, key, 0, QF_KEY_IS_HASH)) > 0) { acc += count*count_mem; } } while (!qfi_next(&qfi)); return acc; } /* find cosine similarity between two QFs. */ void qf_intersect(const QF *qfa, const QF *qfb, QF *qfr) { QFi qfi; const QF *qf_mem, *qf_disk; if (qfa->metadata->hash_mode != qfr->metadata->hash_mode && qfa->metadata->seed != qfr->metadata->seed && qfb->metadata->hash_mode != qfr->metadata->hash_mode && qfb->metadata->seed != qfr->metadata->seed) { fprintf(stderr, "Output QF and input QFs do not have the same hash mode or seed.\n"); exit(1); } // create the iterator on the larger QF. if (qfa->metadata->total_size_in_bytes > qfb->metadata->total_size_in_bytes) { qf_mem = qfb; qf_disk = qfa; } else { qf_mem = qfa; qf_disk = qfb; } qf_iterator_from_position(qf_disk, &qfi, 0); do { uint64_t key = 0, value = 0, count = 0; qfi_get_hash(&qfi, &key, &value, &count); if (qf_count_key_value(qf_mem, key, 0, QF_KEY_IS_HASH) > 0) qf_insert(qfr, key, value, count, QF_NO_LOCK | QF_KEY_IS_HASH); } while (!qfi_next(&qfi)); }
57cb9516ea7d963e8d5b868966c2f0ae92ce1cc0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm1<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, hipblasHandle_t &handle) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm1<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C, hipblasHandle_t &handle) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv1<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, hipblasHandle_t &handle) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(handle, cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv1<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y, hipblasHandle_t &handle ) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(handle, cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy1<float>(const int N, const float alpha, const float* X, float* Y,hipblasHandle_t &handle) { CUBLAS_CHECK(hipblasSaxpy(handle, N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy1<double>(const int N, const double alpha, const double* X, double* Y,hipblasHandle_t &handle) { CUBLAS_CHECK(hipblasDaxpy(handle, N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal1<float>(const int N, const float alpha, float *X, hipblasHandle_t &handle) { CUBLAS_CHECK(hipblasSscal(handle, N, &alpha, X, 1)); } template <> void caffe_gpu_scal1<double>(const int N, const double alpha, double *X,hipblasHandle_t &handle) { CUBLAS_CHECK(hipblasDscal(handle, N, &alpha, X, 1)); } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float* X, hipStream_t str) { hipStream_t initial_stream; CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double* X, hipStream_t str) { hipStream_t initial_stream; CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_axpby1<float>(const int N, const float alpha, const float* X, const float beta, float* Y,hipblasHandle_t &handle) { caffe_gpu_scal1<float>(N, beta, Y,handle); caffe_gpu_axpy1<float>(N, alpha, X, Y,handle); } template <> void caffe_gpu_axpby1<double>(const int N, const double alpha, const double* X, const double beta, double* Y,hipblasHandle_t &handle) { caffe_gpu_scal1<double>(N, beta, Y,handle); caffe_gpu_axpy1<double>(N, alpha, X, Y,handle); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum1<float>(const int n, const float* x, float* y, hipblasHandle_t &handle) { CUBLAS_CHECK(hipblasSasum(handle, n, x, 1, y)); } template <> void caffe_gpu_asum1<double>(const int n, const double* x, double* y,hipblasHandle_t &handle) { CUBLAS_CHECK(hipblasDasum(handle, n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale1<float>(const int n, const float alpha, const float *x, float* y,hipblasHandle_t &handle) { CUBLAS_CHECK(hipblasScopy(handle, n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(handle, n, &alpha, y, 1)); } template <> void caffe_gpu_scale1<double>(const int n, const double alpha, const double *x, double* y,hipblasHandle_t &handle) { CUBLAS_CHECK(hipblasDcopy(handle, n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(handle, n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> void caffe_gpu_set1(const int N, const Dtype alpha, Dtype* Y, hipStream_t &stream) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),0 ,stream, N, alpha, Y); } template void caffe_gpu_set1<int>(const int N, const int alpha, int* Y,hipStream_t &stream); template void caffe_gpu_set1<float>(const int N, const float alpha, float* Y,hipStream_t &stream); template void caffe_gpu_set1<double>(const int N, const double alpha, double* Y,hipStream_t &stream); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar1(const int N, const float alpha, float* Y,hipStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream, N, alpha, Y); } template <> void caffe_gpu_add_scalar1(const int N, const double alpha, double* Y,hipStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add1<float>(const int N, const float* a, const float* b, float* y,hipStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream, N, a, b, y); } template <> void caffe_gpu_add1<double>(const int N, const double* a, const double* b, double* y,hipStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul1<float>(const int N, const float* a, const float* b, float* y,hipStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream, N, a, b, y); } template <> void caffe_gpu_mul1<double>(const int N, const double* a, const double* b, double* y,hipStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div1<float>(const int N, const float* a, const float* b, float* y,hipStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream, N, a, b, y); } template <> void caffe_gpu_div1<double>(const int N, const double* a, const double* b, double* y,hipStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <typename Dtype> __global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(a[index]); } } template <> void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_sqrt1<float>(const int N, const float* a, float* y,hipStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream, N, a, y); } template <> void caffe_gpu_sqrt1<double>(const int N, const double* a, double* y,hipStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream, N, a, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
57cb9516ea7d963e8d5b868966c2f0ae92ce1cc0.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm1<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, cublasHandle_t &handle) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm1<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C, cublasHandle_t &handle) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv1<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, cublasHandle_t &handle) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(handle, cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv1<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y, cublasHandle_t &handle ) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(handle, cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy1<float>(const int N, const float alpha, const float* X, float* Y,cublasHandle_t &handle) { CUBLAS_CHECK(cublasSaxpy(handle, N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy1<double>(const int N, const double alpha, const double* X, double* Y,cublasHandle_t &handle) { CUBLAS_CHECK(cublasDaxpy(handle, N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal1<float>(const int N, const float alpha, float *X, cublasHandle_t &handle) { CUBLAS_CHECK(cublasSscal(handle, N, &alpha, X, 1)); } template <> void caffe_gpu_scal1<double>(const int N, const double alpha, double *X,cublasHandle_t &handle) { CUBLAS_CHECK(cublasDscal(handle, N, &alpha, X, 1)); } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float* X, cudaStream_t str) { cudaStream_t initial_stream; CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double* X, cudaStream_t str) { cudaStream_t initial_stream; CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_axpby1<float>(const int N, const float alpha, const float* X, const float beta, float* Y,cublasHandle_t &handle) { caffe_gpu_scal1<float>(N, beta, Y,handle); caffe_gpu_axpy1<float>(N, alpha, X, Y,handle); } template <> void caffe_gpu_axpby1<double>(const int N, const double alpha, const double* X, const double beta, double* Y,cublasHandle_t &handle) { caffe_gpu_scal1<double>(N, beta, Y,handle); caffe_gpu_axpy1<double>(N, alpha, X, Y,handle); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum1<float>(const int n, const float* x, float* y, cublasHandle_t &handle) { CUBLAS_CHECK(cublasSasum(handle, n, x, 1, y)); } template <> void caffe_gpu_asum1<double>(const int n, const double* x, double* y,cublasHandle_t &handle) { CUBLAS_CHECK(cublasDasum(handle, n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale1<float>(const int n, const float alpha, const float *x, float* y,cublasHandle_t &handle) { CUBLAS_CHECK(cublasScopy(handle, n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(handle, n, &alpha, y, 1)); } template <> void caffe_gpu_scale1<double>(const int n, const double alpha, const double *x, double* y,cublasHandle_t &handle) { CUBLAS_CHECK(cublasDcopy(handle, n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(handle, n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> void caffe_gpu_set1(const int N, const Dtype alpha, Dtype* Y, cudaStream_t &stream) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,0 ,stream>>>( N, alpha, Y); } template void caffe_gpu_set1<int>(const int N, const int alpha, int* Y,cudaStream_t &stream); template void caffe_gpu_set1<float>(const int N, const float alpha, float* Y,cudaStream_t &stream); template void caffe_gpu_set1<double>(const int N, const double alpha, double* Y,cudaStream_t &stream); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar1(const int N, const float alpha, float* Y,cudaStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,0,stream>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar1(const int N, const double alpha, double* Y,cudaStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,0,stream>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add1<float>(const int N, const float* a, const float* b, float* y,cudaStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,0,stream>>>( N, a, b, y); } template <> void caffe_gpu_add1<double>(const int N, const double* a, const double* b, double* y,cudaStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,0,stream>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul1<float>(const int N, const float* a, const float* b, float* y,cudaStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,0,stream>>>( N, a, b, y); } template <> void caffe_gpu_mul1<double>(const int N, const double* a, const double* b, double* y,cudaStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,0,stream>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div1<float>(const int N, const float* a, const float* b, float* y,cudaStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,0,stream>>>( N, a, b, y); } template <> void caffe_gpu_div1<double>(const int N, const double* a, const double* b, double* y,cudaStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,0,stream>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <typename Dtype> __global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(a[index]); } } template <> void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_sqrt1<float>(const int N, const float* a, float* y,cudaStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,0,stream>>>( N, a, y); } template <> void caffe_gpu_sqrt1<double>(const int N, const double* a, double* y,cudaStream_t &stream) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,0,stream>>>( N, a, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
53dbab8611518827a93a7e25c8d50f40d7add6db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "kernels_hip.cuh" __global__ void build_binary_tree(int *x, int *child, int *root, unsigned int N){ }
53dbab8611518827a93a7e25c8d50f40d7add6db.cu
#include <stdio.h> #include "kernels.cuh" __global__ void build_binary_tree(int *x, int *child, int *root, unsigned int N){ }
1a5d99dacd50cdf5933616fa21c48fff90b4827d.hip
// !!! This is a file automatically generated by hipify!!! /** main.cu * \file main.cu * Navier-Stokes equation solver in 2-dimensions, incompressible flow, by finite difference * \author Ernest Yeung * \email ernestyalumni@gmail.com * \date 20161206 * * Compilation tips if you're not using a make file * * nvcc -std=c++11 -c ./physlib/R2grid.cpp -o R2grid.o // or * g++ -std=c++11 -c ./physlib/R2grid.cpp -o R2grid.o * * nvcc -std=c++11 -c ./physlib/dev_R2grid.cu -o dev_R2grid.o * nvcc -std=c++11 main.cu R2grid.o dev_R2grid.o o main.exe * */ /* * cf. Kyle e. Niemeyer, Chih-Jen Sung. * Accelerating reactive-flow simulations using graphics processing units. * AIAA 2013-0371 American Institute of Aeronautics and Astronautics. * http://dx.doi.org/10.5281/zenodo.44333 * * Michael Griebel, Thomas Dornsheifer, Tilman Neunhoeffer. * Numerical Simulation in Fluid Dynamics: A Practical Introduction (Monographs on Mathematical Modeling and Computation). * SIAM: Society for Industrial and Applied Mathematics (December 1997). * ISBN-13:978-0898713985 QA911.G718 1997 * * */ #include <iomanip> // std::setprecision #include <iostream> // std::cout #include <cmath> // std::sqrt, std::fmax , std::fmin #include "./physlib/R2grid.h" // Grid2d #include "./physlib/dev_R2grid.h" // Dev_Grid2d #include "./physlib/uvp.h" // compute_F, compute_G, etc. #include "./physlib/boundary.h" // set_BCs_host, set_BCs #include "./commonlib/checkerror.h" // checkCudaErrors #include "./commonlib/tex_anim2d.h" // GPUAnim2dTex #define GL_GLEXT_PROTOTYPES // needed for identifier glGenBuffer, glBindBuffer, glBufferData, glDeleteBuffers #include <GL/glut.h> #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> // or #include "cuda_gl_interop.h" #include <array> // std::array #include <vector> // std::vector #include <functional> // ################################################################ // ####################### Initialization ######################### // ######################## of global-scope variables and objects # // ################################################################ // ################################################################ // ####################### Initialization ######################### // ################################################################ // discretization (parameters) <==> graphical (parameters) const int L_X { 256 }; // WIDTH // I've tried values 32. 128, 32, 0.5 works; 256, 32, 0.25 works (even though 256, 64 doesn't); 512, 64, doesn't work, neither does 512,32; 512, 16 works const int L_Y { 256 }; // HEIGHT // I've tried values 32, 128, 32, 0.5 works // "real", physical parameters /** try domain size (non-dimensional) */ constexpr const float l_X = 1.0; // length (in x-direction) constexpr const float l_Y = 1.0; // height (in y-direction) // physics (on device); Euclidean (spatial) space dim3 dev_L2 { static_cast<unsigned int>(L_X), static_cast<unsigned int>(L_Y) }; Dev_Grid2d dev_grid2d( dev_L2); // physics (on host); Euclidean (spatial) space constexpr std::array<int,2> LdS { L_X, L_Y } ; constexpr std::array<float,2> ldS { l_X, l_Y }; //Grid2d grid2d{LdS, ldS}; Grid2d grid2d(LdS, ldS); // dynamics (parameters) const dim3 M_i { 64, 64 }; // number of threads per block, i.e. Niemeyer's BLOCK_SIZE // I've tried values 4,4 float t = 0.0 ; int cycle = 0; // iterations for SOR successive over relaxation int iter = 0; int itermax = 1000000; // I tried values such as 10000, Griebel, et. al. = 100 /* READ the parameters of the problem */ /* -------------------------------------------------- */ /** Safety factor for time step modification; safety factor for time stepsize control */ constexpr const float tau = 0.5; /** SOR relaxation parameter; omg is Griebel's notation */ constexpr const float omega = 1.7; /** Discretization mixture parameter (gamma); gamma:upwind differencing factor is Griebel's notation */ constexpr const float gamma_mix_param = 0.9; /** Reynolds number */ constexpr const float Re_num = 1000.0; // SOR iteration tolerance const float tol = 0.001; // Griebel, et. al., and Niemeyer has this at 0.001 // time range const float time_start = 0.0; //const float time_end = 0.25; // L_X=L_Y=128, M_i=32, t_f=0.5 works // initial time step size float deltat = 0.02; // I've tried values 0.002 //////////////////////////////////////// // block and grid dimensions // boundary conditions kernel dim3 block_bcs (M_i.x, 1); dim3 grid_bcs (grid2d.Ld[0] / M_i.x, 1); // pressure kernel dim3 block_pr (M_i.y, 1); dim3 grid_pr (grid2d.Ld[1] / (2 * M_i.y), grid2d.Ld[0]); // block and grid dimensions for F dim3 block_F (M_i.y, 1); dim3 grid_F (grid2d.Ld[1] / M_i.y, grid2d.Ld[0]); // block and grid dimensions for G dim3 block_G (M_i.y, 1); dim3 grid_G (grid2d.Ld[1] / M_i.y, grid2d.Ld[0]); // horizontal pressure boundary conditions dim3 block_hpbc (M_i.x, 1); dim3 grid_hpbc (grid2d.Ld[0] / (2 * M_i.x), 1); // vertical pressure boundary conditions dim3 block_vpbc (M_i.y, 1); dim3 grid_vpbc (grid2d.Ld[1] / (2 * M_i.y), 1); // "internal" cells only, so-called fluid cells from Griebel, et. al. dim3 inter_gridSize ( (grid2d.Ld[0] + M_i.x-1)/M_i.x , (grid2d.Ld[1]+M_i.y-1)/M_i.y ) ; //////////////////////////////////////// // residual variable //constexpr const int size_res = grid_pr.x * grid_pr.y; doesn't work in std::array, expression must have a constant value constexpr const int size_res = L_Y/2 * L_X; std::array<float, size_res> res_array; /* delt satisfying CFL conditions */ /* ------------------------------ */ float max_u = 1.0e-10; float max_v = 1.0e-10; // variables to store maximum velocities constexpr const int size_max = L_Y/2 * L_X ; std::array<float, size_max> max_u_array; std::array<float, size_max> max_v_array; // pressure sum std::array<float, size_res> pres_sum_array; //////////////////////////////////////// // allocate and transfer device memory float* pres_sum_d; float* res_d; float* max_u_d; float* max_v_d; // time-step size based on grid and Reynolds number float dt_Re = 0.5 * Re_num / ((1.0 / (grid2d.hd[0] * grid2d.hd[0])) + (1.0 / (grid2d.hd[1] * grid2d.hd[1]))); /* ----------------------------------------------------------------- */ // graphics /* ----------------------------------------------------------------- */ const int iters_per_render { 100 }; GPUAnim2dTex bitmap( L_X, L_Y ); GPUAnim2dTex* testGPUAnim2dTex = &bitmap; void make_render(dim3 Ld_in, int iters_per_render_in, GPUAnim2dTex* texmap ) { uchar4 *d_out = 0; hipGraphicsMapResources(1, &texmap->cuda_pixbufferObj_resource, 0); hipGraphicsResourceGetMappedPointer((void **)&d_out, NULL, texmap->cuda_pixbufferObj_resource); for (int i = 0; i < iters_per_render_in; ++i) { // ################################################################ // ####################### ########################## // ####################### MAIN LOOP ########################## // ####################### ########################## // ################################################################ /* t i m e l o o p */ /* ------------------ */ /* time loop step */ // calculate time step based on stability and CFL deltat = std::fmin((grid2d.hd[0] / max_u), (grid2d.hd[1] / max_v)); deltat = tau * std::fmin(dt_Re, deltat); // calculate F and G hipLaunchKernelGGL(( calculate_F) , dim3(grid_F), dim3(block_F), 0, 0, dev_grid2d.u, dev_grid2d.v, dev_grid2d.F, gamma_mix_param, Re_num, 0.0, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); hipLaunchKernelGGL(( calculate_G) , dim3(grid_G), dim3(block_G), 0, 0, dev_grid2d.u, dev_grid2d.v, dev_grid2d.G, gamma_mix_param, Re_num, 0.0, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); // get L2 norm of initial pressure hipLaunchKernelGGL(( sum_pressure) , dim3(grid_pr), dim3(block_pr), 0, 0, dev_grid2d.pres_red, dev_grid2d.pres_black, pres_sum_d, grid2d.Ld[0], grid2d.Ld[1], M_i.x); hipMemcpy (pres_sum_array.data(), pres_sum_d, size_res * sizeof(float), hipMemcpyDeviceToHost); float p0_norm = 0.0; #pragma unroll for (int i = 0; i < size_res; ++i) { p0_norm += pres_sum_array[i]; } p0_norm = sqrt(p0_norm / ((float)(grid2d.NFLAT()))); if (p0_norm < 0.0001) { p0_norm = 1.0; } // ensure all kernels are finished hipDeviceSynchronize(); float norm_L2; // calculate new pressure // red-black Gauss-Seidel with SOR iteration loop for (iter = 1; iter <= itermax; ++iter) { // set pressure boundary conditions hipLaunchKernelGGL(( set_horz_pres_BCs) , dim3(grid_hpbc), dim3(block_hpbc), 0, 0, dev_grid2d.pres_red, dev_grid2d.pres_black, grid2d.Ld[0], grid2d.Ld[1] ); hipLaunchKernelGGL(( set_vert_pres_BCs) , dim3(grid_vpbc), dim3(block_hpbc), 0, 0, dev_grid2d.pres_red, dev_grid2d.pres_black, grid2d.Ld[0], grid2d.Ld[1] ); // ensure kernel finished hipDeviceSynchronize(); // update red cells hipLaunchKernelGGL(( red_kernel) , dim3(grid_pr), dim3(block_pr), 0, 0, dev_grid2d.F, dev_grid2d.G, dev_grid2d.pres_black, dev_grid2d.pres_red, omega, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); // ensure red kernel finished hipDeviceSynchronize(); // update black cells hipLaunchKernelGGL(( black_kernel) , dim3(grid_pr), dim3(block_pr), 0, 0, dev_grid2d.F, dev_grid2d.G, dev_grid2d.pres_red, dev_grid2d.pres_black, omega, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); // ensure red kernel finished hipDeviceSynchronize(); // calculate residual values hipLaunchKernelGGL(( calc_residual) , dim3(grid_pr), dim3(block_pr), 0, 0, dev_grid2d.F, dev_grid2d.G, dev_grid2d.pres_red, dev_grid2d.pres_black, res_d, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); // transfer residual value(s) back to CPU hipMemcpy (res_array.data(), res_d, size_res * sizeof(float), hipMemcpyDeviceToHost); norm_L2 = 0.0; #pragma unroll for (int i = 0; i < size_res; ++i) { norm_L2 += res_array[i]; } // calculate residual norm_L2 = sqrt(norm_L2 / ((float)( grid2d.NFLAT() ))) / p0_norm; // if tolerance has been reached, end SOR iterations if (norm_L2 < tol) { break; } } // end for std::cout << "Time = " << t+deltat << ", deltat = " << deltat << ", iter = " << iter << ", res (i.e. norm_L2) = " << norm_L2 << std::endl; // calculate new velocities and transfer maximums back hipLaunchKernelGGL(( calculate_u) , dim3(grid_pr), dim3(block_pr), 0, 0, dev_grid2d.F, dev_grid2d.pres_red, dev_grid2d.pres_black, dev_grid2d.u, max_u_d, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); hipMemcpy (max_u_array.data(), max_u_d, size_max * sizeof(float), hipMemcpyDeviceToHost); hipLaunchKernelGGL(( calculate_v) , dim3(grid_pr), dim3(block_pr), 0, 0, dev_grid2d.G, dev_grid2d.pres_red, dev_grid2d.pres_black, dev_grid2d.v, max_v_d, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); hipMemcpy (max_v_array.data(), max_v_d, size_max * sizeof(float), hipMemcpyDeviceToHost); // get maximum u- and v- velocities max_v = 1.0e-10; max_u = 1.0e-10; #pragma unroll for (int i = 0; i < size_max; ++i) { float test_u = max_u_array[i]; max_u = std::fmax(max_u, test_u); float test_v = max_v_array[i]; max_v = std::fmax(max_v, test_v); } // set velocity boundary conditions hipLaunchKernelGGL(( set_BCs) , dim3(grid_bcs), dim3(block_bcs), 0, 0, dev_grid2d.u, dev_grid2d.v, grid2d.Ld[0], grid2d.Ld[1]); hipDeviceSynchronize(); // increase time t += deltat; hipLaunchKernelGGL(( float_to_char), dim3(inter_gridSize), dim3(M_i), 0, 0, d_out, dev_grid2d.u, grid2d.Ld[0], grid2d.Ld[1] ); } // for loop, iters per render, END hipGraphicsUnmapResources(1, &texmap->cuda_pixbufferObj_resource, 0 ); hipDeviceSynchronize(); } // END make render std::function<void()> render = std::bind( make_render, dev_L2, iters_per_render, testGPUAnim2dTex); std::function<void()> draw_texture = std::bind( make_draw_texture, L_X, L_Y); void display() { render() ; draw_texture(); glutSwapBuffers(); } int main( int argc, char *argv[] ) { // set initial BCs set_BCs_host (grid2d.u , grid2d.v, grid2d.Ld[0], grid2d.Ld[1]); // get max velocity for initial values (including BCs) #pragma unroll for (int col = 0; col < grid2d.Ld[0] + 2; ++col) { #pragma unroll for (int row = 1; row < grid2d.Ld[1] + 2; ++row) { max_u = fmax(max_u, fabs( grid2d.u[flatten(col, row, grid2d.Ld[1]+2)] )); } } #pragma unroll for (int col = 1; col < grid2d.Ld[0] + 2; ++col) { #pragma unroll for (int row = 0; row < grid2d.Ld[1] + 2; ++row) { max_v = fmax(max_v, fabs( grid2d.v[ flatten(col, row, grid2d.Ld[1] + 2) ] )); } } hipMalloc ((void**) &pres_sum_d, size_res * sizeof(float)); hipMalloc ((void**) &res_d, size_res * sizeof(float)); hipMalloc ((void**) &max_u_d, size_max * sizeof(float)); hipMalloc ((void**) &max_v_d, size_max * sizeof(float)); // copy to device memory hipMemcpy (dev_grid2d.u, grid2d.u, grid2d.staggered_NFLAT() * sizeof(float), hipMemcpyHostToDevice); hipMemcpy (dev_grid2d.F, grid2d.F, grid2d.staggered_NFLAT() * sizeof(float), hipMemcpyHostToDevice); hipMemcpy (dev_grid2d.v, grid2d.v, grid2d.staggered_NFLAT() * sizeof(float), hipMemcpyHostToDevice); hipMemcpy (dev_grid2d.G, grid2d.G, grid2d.staggered_NFLAT() * sizeof(float), hipMemcpyHostToDevice); hipMemcpy (dev_grid2d.pres_red, grid2d.pres_red, (grid2d.Ld[0]/2+2)*(grid2d.Ld[1]+2) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy (dev_grid2d.pres_black, grid2d.pres_black, (grid2d.Ld[0]/2+2 )*(grid2d.Ld[1]+2)* sizeof(float), hipMemcpyHostToDevice); //////////////////////////////////////// float t = time_start; // graphics run testGPUAnim2dTex->initGLUT(&argc, argv); glutKeyboardFunc(keyboard_func); glutMouseFunc(mouse_func); glutIdleFunc(idle); glutDisplayFunc(display); testGPUAnim2dTex->initPixelBuffer(); glutMainLoop(); // free device memory checkCudaErrors( hipFree( dev_grid2d.u )); checkCudaErrors( hipFree( dev_grid2d.v )); checkCudaErrors( hipFree( dev_grid2d.F )); checkCudaErrors( hipFree( dev_grid2d.G )); checkCudaErrors( hipFree( dev_grid2d.pres_red )); checkCudaErrors( hipFree( dev_grid2d.pres_black )); checkCudaErrors( hipFree( max_u_d )); checkCudaErrors( hipFree( max_v_d )); checkCudaErrors( hipFree( pres_sum_d )); checkCudaErrors( hipFree( res_d )); hipDeviceReset(); return 0; } // END of main
1a5d99dacd50cdf5933616fa21c48fff90b4827d.cu
/** main.cu * \file main.cu * Navier-Stokes equation solver in 2-dimensions, incompressible flow, by finite difference * \author Ernest Yeung * \email ernestyalumni@gmail.com * \date 20161206 * * Compilation tips if you're not using a make file * * nvcc -std=c++11 -c ./physlib/R2grid.cpp -o R2grid.o // or * g++ -std=c++11 -c ./physlib/R2grid.cpp -o R2grid.o * * nvcc -std=c++11 -c ./physlib/dev_R2grid.cu -o dev_R2grid.o * nvcc -std=c++11 main.cu R2grid.o dev_R2grid.o o main.exe * */ /* * cf. Kyle e. Niemeyer, Chih-Jen Sung. * Accelerating reactive-flow simulations using graphics processing units. * AIAA 2013-0371 American Institute of Aeronautics and Astronautics. * http://dx.doi.org/10.5281/zenodo.44333 * * Michael Griebel, Thomas Dornsheifer, Tilman Neunhoeffer. * Numerical Simulation in Fluid Dynamics: A Practical Introduction (Monographs on Mathematical Modeling and Computation). * SIAM: Society for Industrial and Applied Mathematics (December 1997). * ISBN-13:978-0898713985 QA911.G718 1997 * * */ #include <iomanip> // std::setprecision #include <iostream> // std::cout #include <cmath> // std::sqrt, std::fmax , std::fmin #include "./physlib/R2grid.h" // Grid2d #include "./physlib/dev_R2grid.h" // Dev_Grid2d #include "./physlib/uvp.h" // compute_F, compute_G, etc. #include "./physlib/boundary.h" // set_BCs_host, set_BCs #include "./commonlib/checkerror.h" // checkCudaErrors #include "./commonlib/tex_anim2d.h" // GPUAnim2dTex #define GL_GLEXT_PROTOTYPES // needed for identifier glGenBuffer, glBindBuffer, glBufferData, glDeleteBuffers #include <GL/glut.h> #include <cuda_runtime.h> #include <cuda_gl_interop.h> // or #include "cuda_gl_interop.h" #include <array> // std::array #include <vector> // std::vector #include <functional> // ################################################################ // ####################### Initialization ######################### // ######################## of global-scope variables and objects # // ################################################################ // ################################################################ // ####################### Initialization ######################### // ################################################################ // discretization (parameters) <==> graphical (parameters) const int L_X { 256 }; // WIDTH // I've tried values 32. 128, 32, 0.5 works; 256, 32, 0.25 works (even though 256, 64 doesn't); 512, 64, doesn't work, neither does 512,32; 512, 16 works const int L_Y { 256 }; // HEIGHT // I've tried values 32, 128, 32, 0.5 works // "real", physical parameters /** try domain size (non-dimensional) */ constexpr const float l_X = 1.0; // length (in x-direction) constexpr const float l_Y = 1.0; // height (in y-direction) // physics (on device); Euclidean (spatial) space dim3 dev_L2 { static_cast<unsigned int>(L_X), static_cast<unsigned int>(L_Y) }; Dev_Grid2d dev_grid2d( dev_L2); // physics (on host); Euclidean (spatial) space constexpr std::array<int,2> LdS { L_X, L_Y } ; constexpr std::array<float,2> ldS { l_X, l_Y }; //Grid2d grid2d{LdS, ldS}; Grid2d grid2d(LdS, ldS); // dynamics (parameters) const dim3 M_i { 64, 64 }; // number of threads per block, i.e. Niemeyer's BLOCK_SIZE // I've tried values 4,4 float t = 0.0 ; int cycle = 0; // iterations for SOR successive over relaxation int iter = 0; int itermax = 1000000; // I tried values such as 10000, Griebel, et. al. = 100 /* READ the parameters of the problem */ /* -------------------------------------------------- */ /** Safety factor for time step modification; safety factor for time stepsize control */ constexpr const float tau = 0.5; /** SOR relaxation parameter; omg is Griebel's notation */ constexpr const float omega = 1.7; /** Discretization mixture parameter (gamma); gamma:upwind differencing factor is Griebel's notation */ constexpr const float gamma_mix_param = 0.9; /** Reynolds number */ constexpr const float Re_num = 1000.0; // SOR iteration tolerance const float tol = 0.001; // Griebel, et. al., and Niemeyer has this at 0.001 // time range const float time_start = 0.0; //const float time_end = 0.25; // L_X=L_Y=128, M_i=32, t_f=0.5 works // initial time step size float deltat = 0.02; // I've tried values 0.002 //////////////////////////////////////// // block and grid dimensions // boundary conditions kernel dim3 block_bcs (M_i.x, 1); dim3 grid_bcs (grid2d.Ld[0] / M_i.x, 1); // pressure kernel dim3 block_pr (M_i.y, 1); dim3 grid_pr (grid2d.Ld[1] / (2 * M_i.y), grid2d.Ld[0]); // block and grid dimensions for F dim3 block_F (M_i.y, 1); dim3 grid_F (grid2d.Ld[1] / M_i.y, grid2d.Ld[0]); // block and grid dimensions for G dim3 block_G (M_i.y, 1); dim3 grid_G (grid2d.Ld[1] / M_i.y, grid2d.Ld[0]); // horizontal pressure boundary conditions dim3 block_hpbc (M_i.x, 1); dim3 grid_hpbc (grid2d.Ld[0] / (2 * M_i.x), 1); // vertical pressure boundary conditions dim3 block_vpbc (M_i.y, 1); dim3 grid_vpbc (grid2d.Ld[1] / (2 * M_i.y), 1); // "internal" cells only, so-called fluid cells from Griebel, et. al. dim3 inter_gridSize ( (grid2d.Ld[0] + M_i.x-1)/M_i.x , (grid2d.Ld[1]+M_i.y-1)/M_i.y ) ; //////////////////////////////////////// // residual variable //constexpr const int size_res = grid_pr.x * grid_pr.y; doesn't work in std::array, expression must have a constant value constexpr const int size_res = L_Y/2 * L_X; std::array<float, size_res> res_array; /* delt satisfying CFL conditions */ /* ------------------------------ */ float max_u = 1.0e-10; float max_v = 1.0e-10; // variables to store maximum velocities constexpr const int size_max = L_Y/2 * L_X ; std::array<float, size_max> max_u_array; std::array<float, size_max> max_v_array; // pressure sum std::array<float, size_res> pres_sum_array; //////////////////////////////////////// // allocate and transfer device memory float* pres_sum_d; float* res_d; float* max_u_d; float* max_v_d; // time-step size based on grid and Reynolds number float dt_Re = 0.5 * Re_num / ((1.0 / (grid2d.hd[0] * grid2d.hd[0])) + (1.0 / (grid2d.hd[1] * grid2d.hd[1]))); /* ----------------------------------------------------------------- */ // graphics /* ----------------------------------------------------------------- */ const int iters_per_render { 100 }; GPUAnim2dTex bitmap( L_X, L_Y ); GPUAnim2dTex* testGPUAnim2dTex = &bitmap; void make_render(dim3 Ld_in, int iters_per_render_in, GPUAnim2dTex* texmap ) { uchar4 *d_out = 0; cudaGraphicsMapResources(1, &texmap->cuda_pixbufferObj_resource, 0); cudaGraphicsResourceGetMappedPointer((void **)&d_out, NULL, texmap->cuda_pixbufferObj_resource); for (int i = 0; i < iters_per_render_in; ++i) { // ################################################################ // ####################### ########################## // ####################### MAIN LOOP ########################## // ####################### ########################## // ################################################################ /* t i m e l o o p */ /* ------------------ */ /* time loop step */ // calculate time step based on stability and CFL deltat = std::fmin((grid2d.hd[0] / max_u), (grid2d.hd[1] / max_v)); deltat = tau * std::fmin(dt_Re, deltat); // calculate F and G calculate_F <<<grid_F, block_F>>> ( dev_grid2d.u, dev_grid2d.v, dev_grid2d.F, gamma_mix_param, Re_num, 0.0, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); calculate_G <<<grid_G, block_G>>> (dev_grid2d.u, dev_grid2d.v, dev_grid2d.G, gamma_mix_param, Re_num, 0.0, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); // get L2 norm of initial pressure sum_pressure <<<grid_pr, block_pr>>> (dev_grid2d.pres_red, dev_grid2d.pres_black, pres_sum_d, grid2d.Ld[0], grid2d.Ld[1], M_i.x); cudaMemcpy (pres_sum_array.data(), pres_sum_d, size_res * sizeof(float), cudaMemcpyDeviceToHost); float p0_norm = 0.0; #pragma unroll for (int i = 0; i < size_res; ++i) { p0_norm += pres_sum_array[i]; } p0_norm = sqrt(p0_norm / ((float)(grid2d.NFLAT()))); if (p0_norm < 0.0001) { p0_norm = 1.0; } // ensure all kernels are finished cudaDeviceSynchronize(); float norm_L2; // calculate new pressure // red-black Gauss-Seidel with SOR iteration loop for (iter = 1; iter <= itermax; ++iter) { // set pressure boundary conditions set_horz_pres_BCs <<<grid_hpbc, block_hpbc>>> (dev_grid2d.pres_red, dev_grid2d.pres_black, grid2d.Ld[0], grid2d.Ld[1] ); set_vert_pres_BCs <<<grid_vpbc, block_hpbc>>> (dev_grid2d.pres_red, dev_grid2d.pres_black, grid2d.Ld[0], grid2d.Ld[1] ); // ensure kernel finished cudaDeviceSynchronize(); // update red cells red_kernel <<<grid_pr, block_pr>>> (dev_grid2d.F, dev_grid2d.G, dev_grid2d.pres_black, dev_grid2d.pres_red, omega, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); // ensure red kernel finished cudaDeviceSynchronize(); // update black cells black_kernel <<<grid_pr, block_pr>>> (dev_grid2d.F, dev_grid2d.G, dev_grid2d.pres_red, dev_grid2d.pres_black, omega, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); // ensure red kernel finished cudaDeviceSynchronize(); // calculate residual values calc_residual <<<grid_pr, block_pr>>> (dev_grid2d.F, dev_grid2d.G, dev_grid2d.pres_red, dev_grid2d.pres_black, res_d, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); // transfer residual value(s) back to CPU cudaMemcpy (res_array.data(), res_d, size_res * sizeof(float), cudaMemcpyDeviceToHost); norm_L2 = 0.0; #pragma unroll for (int i = 0; i < size_res; ++i) { norm_L2 += res_array[i]; } // calculate residual norm_L2 = sqrt(norm_L2 / ((float)( grid2d.NFLAT() ))) / p0_norm; // if tolerance has been reached, end SOR iterations if (norm_L2 < tol) { break; } } // end for std::cout << "Time = " << t+deltat << ", deltat = " << deltat << ", iter = " << iter << ", res (i.e. norm_L2) = " << norm_L2 << std::endl; // calculate new velocities and transfer maximums back calculate_u <<<grid_pr, block_pr>>> ( dev_grid2d.F, dev_grid2d.pres_red, dev_grid2d.pres_black, dev_grid2d.u, max_u_d, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); cudaMemcpy (max_u_array.data(), max_u_d, size_max * sizeof(float), cudaMemcpyDeviceToHost); calculate_v <<<grid_pr, block_pr>>> (dev_grid2d.G, dev_grid2d.pres_red, dev_grid2d.pres_black, dev_grid2d.v, max_v_d, deltat, grid2d.hd[0], grid2d.hd[1], grid2d.Ld[0], grid2d.Ld[1], M_i.x); cudaMemcpy (max_v_array.data(), max_v_d, size_max * sizeof(float), cudaMemcpyDeviceToHost); // get maximum u- and v- velocities max_v = 1.0e-10; max_u = 1.0e-10; #pragma unroll for (int i = 0; i < size_max; ++i) { float test_u = max_u_array[i]; max_u = std::fmax(max_u, test_u); float test_v = max_v_array[i]; max_v = std::fmax(max_v, test_v); } // set velocity boundary conditions set_BCs <<<grid_bcs, block_bcs>>> (dev_grid2d.u, dev_grid2d.v, grid2d.Ld[0], grid2d.Ld[1]); cudaDeviceSynchronize(); // increase time t += deltat; float_to_char<<<inter_gridSize, M_i>>>( d_out, dev_grid2d.u, grid2d.Ld[0], grid2d.Ld[1] ); } // for loop, iters per render, END cudaGraphicsUnmapResources(1, &texmap->cuda_pixbufferObj_resource, 0 ); cudaDeviceSynchronize(); } // END make render std::function<void()> render = std::bind( make_render, dev_L2, iters_per_render, testGPUAnim2dTex); std::function<void()> draw_texture = std::bind( make_draw_texture, L_X, L_Y); void display() { render() ; draw_texture(); glutSwapBuffers(); } int main( int argc, char *argv[] ) { // set initial BCs set_BCs_host (grid2d.u , grid2d.v, grid2d.Ld[0], grid2d.Ld[1]); // get max velocity for initial values (including BCs) #pragma unroll for (int col = 0; col < grid2d.Ld[0] + 2; ++col) { #pragma unroll for (int row = 1; row < grid2d.Ld[1] + 2; ++row) { max_u = fmax(max_u, fabs( grid2d.u[flatten(col, row, grid2d.Ld[1]+2)] )); } } #pragma unroll for (int col = 1; col < grid2d.Ld[0] + 2; ++col) { #pragma unroll for (int row = 0; row < grid2d.Ld[1] + 2; ++row) { max_v = fmax(max_v, fabs( grid2d.v[ flatten(col, row, grid2d.Ld[1] + 2) ] )); } } cudaMalloc ((void**) &pres_sum_d, size_res * sizeof(float)); cudaMalloc ((void**) &res_d, size_res * sizeof(float)); cudaMalloc ((void**) &max_u_d, size_max * sizeof(float)); cudaMalloc ((void**) &max_v_d, size_max * sizeof(float)); // copy to device memory cudaMemcpy (dev_grid2d.u, grid2d.u, grid2d.staggered_NFLAT() * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy (dev_grid2d.F, grid2d.F, grid2d.staggered_NFLAT() * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy (dev_grid2d.v, grid2d.v, grid2d.staggered_NFLAT() * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy (dev_grid2d.G, grid2d.G, grid2d.staggered_NFLAT() * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy (dev_grid2d.pres_red, grid2d.pres_red, (grid2d.Ld[0]/2+2)*(grid2d.Ld[1]+2) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy (dev_grid2d.pres_black, grid2d.pres_black, (grid2d.Ld[0]/2+2 )*(grid2d.Ld[1]+2)* sizeof(float), cudaMemcpyHostToDevice); //////////////////////////////////////// float t = time_start; // graphics run testGPUAnim2dTex->initGLUT(&argc, argv); glutKeyboardFunc(keyboard_func); glutMouseFunc(mouse_func); glutIdleFunc(idle); glutDisplayFunc(display); testGPUAnim2dTex->initPixelBuffer(); glutMainLoop(); // free device memory checkCudaErrors( cudaFree( dev_grid2d.u )); checkCudaErrors( cudaFree( dev_grid2d.v )); checkCudaErrors( cudaFree( dev_grid2d.F )); checkCudaErrors( cudaFree( dev_grid2d.G )); checkCudaErrors( cudaFree( dev_grid2d.pres_red )); checkCudaErrors( cudaFree( dev_grid2d.pres_black )); checkCudaErrors( cudaFree( max_u_d )); checkCudaErrors( cudaFree( max_v_d )); checkCudaErrors( cudaFree( pres_sum_d )); checkCudaErrors( cudaFree( res_d )); cudaDeviceReset(); return 0; } // END of main
ea69add02d34ff21aac097e1c14642b25c172372.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_calc_dt_kernel_get; int xdim0_calc_dt_kernel_get_h = -1; __constant__ int ydim0_calc_dt_kernel_get; int ydim0_calc_dt_kernel_get_h = -1; __constant__ int xdim1_calc_dt_kernel_get; int xdim1_calc_dt_kernel_get_h = -1; __constant__ int ydim1_calc_dt_kernel_get; int ydim1_calc_dt_kernel_get_h = -1; __constant__ int xdim4_calc_dt_kernel_get; int xdim4_calc_dt_kernel_get_h = -1; __constant__ int ydim4_calc_dt_kernel_get; int ydim4_calc_dt_kernel_get_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC4 #define OPS_ACC0(x, y, z) \ (x + xdim0_calc_dt_kernel_get * (y) + \ xdim0_calc_dt_kernel_get * ydim0_calc_dt_kernel_get * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_calc_dt_kernel_get * (y) + \ xdim1_calc_dt_kernel_get * ydim1_calc_dt_kernel_get * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_calc_dt_kernel_get * (y) + \ xdim4_calc_dt_kernel_get * ydim4_calc_dt_kernel_get * (z)) // user function __device__ void calc_dt_kernel_get_gpu(const double *cellx, const double *celly, double *xl_pos, double *yl_pos, const double *cellz, double *zl_pos) { *xl_pos = cellx[OPS_ACC0(0, 0, 0)]; *yl_pos = celly[OPS_ACC1(0, 0, 0)]; *zl_pos = cellz[OPS_ACC4(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC4 __global__ void ops_calc_dt_kernel_get(const double *__restrict arg0, const double *__restrict arg1, double *__restrict arg2, double *__restrict arg3, const double *__restrict arg4, double *__restrict arg5, int size0, int size1, int size2) { double arg2_l[1]; double arg3_l[1]; double arg5_l[1]; for (int d = 0; d < 1; d++) arg2_l[d] = ZERO_double; for (int d = 0; d < 1; d++) arg3_l[d] = ZERO_double; for (int d = 0; d < 1; d++) arg5_l[d] = ZERO_double; int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim0_calc_dt_kernel_get + idx_z * 0 * 1 * xdim0_calc_dt_kernel_get * ydim0_calc_dt_kernel_get; arg1 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim1_calc_dt_kernel_get + idx_z * 0 * 1 * xdim1_calc_dt_kernel_get * ydim1_calc_dt_kernel_get; arg4 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim4_calc_dt_kernel_get + idx_z * 1 * 1 * xdim4_calc_dt_kernel_get * ydim4_calc_dt_kernel_get; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { calc_dt_kernel_get_gpu(arg0, arg1, arg2_l, arg3_l, arg4, arg5_l); } for (int d = 0; d < 1; d++) ops_reduction_cuda<OPS_INC>(&arg2[d + (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y) * 1], arg2_l[d]); for (int d = 0; d < 1; d++) ops_reduction_cuda<OPS_INC>(&arg3[d + (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y) * 1], arg3_l[d]); for (int d = 0; d < 1; d++) ops_reduction_cuda<OPS_INC>(&arg5[d + (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y) * 1], arg5_l[d]); } // host stub function void ops_par_loop_calc_dt_kernel_get(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { // Timing double t1, t2, c1, c2; ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 6, range, 39)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(39, "calc_dt_kernel_get"); OPS_kernels[39].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != xdim0_calc_dt_kernel_get_h || ydim0 != ydim0_calc_dt_kernel_get_h || xdim1 != xdim1_calc_dt_kernel_get_h || ydim1 != ydim1_calc_dt_kernel_get_h || xdim4 != xdim4_calc_dt_kernel_get_h || ydim4 != ydim4_calc_dt_kernel_get_h) { hipMemcpyToSymbol(xdim0_calc_dt_kernel_get, &xdim0, sizeof(int)); xdim0_calc_dt_kernel_get_h = xdim0; hipMemcpyToSymbol(ydim0_calc_dt_kernel_get, &ydim0, sizeof(int)); ydim0_calc_dt_kernel_get_h = ydim0; hipMemcpyToSymbol(xdim1_calc_dt_kernel_get, &xdim1, sizeof(int)); xdim1_calc_dt_kernel_get_h = xdim1; hipMemcpyToSymbol(ydim1_calc_dt_kernel_get, &ydim1, sizeof(int)); ydim1_calc_dt_kernel_get_h = ydim1; hipMemcpyToSymbol(xdim4_calc_dt_kernel_get, &xdim4, sizeof(int)); xdim4_calc_dt_kernel_get_h = xdim4; hipMemcpyToSymbol(ydim4_calc_dt_kernel_get, &ydim4, sizeof(int)); ydim4_calc_dt_kernel_get_h = ydim4; } #ifdef OPS_MPI double *arg2h = (double *)(((ops_reduction)args[2].data)->data + ((ops_reduction)args[2].data)->size * block->index); #else double *arg2h = (double *)(((ops_reduction)args[2].data)->data); #endif #ifdef OPS_MPI double *arg3h = (double *)(((ops_reduction)args[3].data)->data + ((ops_reduction)args[3].data)->size * block->index); #else double *arg3h = (double *)(((ops_reduction)args[3].data)->data); #endif #ifdef OPS_MPI double *arg5h = (double *)(((ops_reduction)args[5].data)->data + ((ops_reduction)args[5].data)->size * block->index); #else double *arg5h = (double *)(((ops_reduction)args[5].data)->data); #endif dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int nblocks = ((x_size - 1) / OPS_block_size_x + 1) * ((y_size - 1) / OPS_block_size_y + 1) * z_size; int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double)); reduct_size = MAX(reduct_size, sizeof(double) * 1); reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double)); reduct_size = MAX(reduct_size, sizeof(double) * 1); reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double)); reduct_size = MAX(reduct_size, sizeof(double) * 1); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg2.data = OPS_reduct_h + reduct_bytes; arg2.data_d = OPS_reduct_d + reduct_bytes; for (int b = 0; b < maxblocks; b++) for (int d = 0; d < 1; d++) ((double *)arg2.data)[d + b * 1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double)); arg3.data = OPS_reduct_h + reduct_bytes; arg3.data_d = OPS_reduct_d + reduct_bytes; for (int b = 0; b < maxblocks; b++) for (int d = 0; d < 1; d++) ((double *)arg3.data)[d + b * 1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double)); arg5.data = OPS_reduct_h + reduct_bytes; arg5.data_d = OPS_reduct_d + reduct_bytes; for (int b = 0; b < maxblocks; b++) for (int d = 0; d < 1; d++) ((double *)arg5.data)[d + b * 1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double)); mvReductArraysToDevice(reduct_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat4 = args[4].dat->elem_size; char *p_a[6]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args, 6, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[39].mpi_time += t2 - t1; } int nshared = 0; int nthread = OPS_block_size_x * OPS_block_size_y; nshared = MAX(nshared, sizeof(double) * 1); nshared = MAX(nshared, sizeof(double) * 1); nshared = MAX(nshared, sizeof(double) * 1); nshared = MAX(nshared * nthread, reduct_size * nthread); // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_calc_dt_kernel_get), dim3(grid), dim3(tblock), nshared, 0, (double *)p_a[0], (double *)p_a[1], (double *)arg2.data_d, (double *)arg3.data_d, (double *)p_a[4], (double *)arg5.data_d, x_size, y_size, z_size); mvReductArraysToHost(reduct_bytes); for (int b = 0; b < maxblocks; b++) { for (int d = 0; d < 1; d++) { arg2h[d] = arg2h[d] + ((double *)arg2.data)[d + b * 1]; } } arg2.data = (char *)arg2h; for (int b = 0; b < maxblocks; b++) { for (int d = 0; d < 1; d++) { arg3h[d] = arg3h[d] + ((double *)arg3.data)[d + b * 1]; } } arg3.data = (char *)arg3h; for (int b = 0; b < maxblocks; b++) { for (int d = 0; d < 1; d++) { arg5h[d] = arg5h[d] + ((double *)arg5.data)[d + b * 1]; } } arg5.data = (char *)arg5h; if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[39].time += t1 - t2; } ops_set_dirtybit_device(args, 6); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[39].mpi_time += t2 - t1; OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg4); } }
ea69add02d34ff21aac097e1c14642b25c172372.cu
// // auto-generated by ops.py // __constant__ int xdim0_calc_dt_kernel_get; int xdim0_calc_dt_kernel_get_h = -1; __constant__ int ydim0_calc_dt_kernel_get; int ydim0_calc_dt_kernel_get_h = -1; __constant__ int xdim1_calc_dt_kernel_get; int xdim1_calc_dt_kernel_get_h = -1; __constant__ int ydim1_calc_dt_kernel_get; int ydim1_calc_dt_kernel_get_h = -1; __constant__ int xdim4_calc_dt_kernel_get; int xdim4_calc_dt_kernel_get_h = -1; __constant__ int ydim4_calc_dt_kernel_get; int ydim4_calc_dt_kernel_get_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC4 #define OPS_ACC0(x, y, z) \ (x + xdim0_calc_dt_kernel_get * (y) + \ xdim0_calc_dt_kernel_get * ydim0_calc_dt_kernel_get * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_calc_dt_kernel_get * (y) + \ xdim1_calc_dt_kernel_get * ydim1_calc_dt_kernel_get * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_calc_dt_kernel_get * (y) + \ xdim4_calc_dt_kernel_get * ydim4_calc_dt_kernel_get * (z)) // user function __device__ void calc_dt_kernel_get_gpu(const double *cellx, const double *celly, double *xl_pos, double *yl_pos, const double *cellz, double *zl_pos) { *xl_pos = cellx[OPS_ACC0(0, 0, 0)]; *yl_pos = celly[OPS_ACC1(0, 0, 0)]; *zl_pos = cellz[OPS_ACC4(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC4 __global__ void ops_calc_dt_kernel_get(const double *__restrict arg0, const double *__restrict arg1, double *__restrict arg2, double *__restrict arg3, const double *__restrict arg4, double *__restrict arg5, int size0, int size1, int size2) { double arg2_l[1]; double arg3_l[1]; double arg5_l[1]; for (int d = 0; d < 1; d++) arg2_l[d] = ZERO_double; for (int d = 0; d < 1; d++) arg3_l[d] = ZERO_double; for (int d = 0; d < 1; d++) arg5_l[d] = ZERO_double; int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim0_calc_dt_kernel_get + idx_z * 0 * 1 * xdim0_calc_dt_kernel_get * ydim0_calc_dt_kernel_get; arg1 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim1_calc_dt_kernel_get + idx_z * 0 * 1 * xdim1_calc_dt_kernel_get * ydim1_calc_dt_kernel_get; arg4 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim4_calc_dt_kernel_get + idx_z * 1 * 1 * xdim4_calc_dt_kernel_get * ydim4_calc_dt_kernel_get; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { calc_dt_kernel_get_gpu(arg0, arg1, arg2_l, arg3_l, arg4, arg5_l); } for (int d = 0; d < 1; d++) ops_reduction_cuda<OPS_INC>(&arg2[d + (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y) * 1], arg2_l[d]); for (int d = 0; d < 1; d++) ops_reduction_cuda<OPS_INC>(&arg3[d + (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y) * 1], arg3_l[d]); for (int d = 0; d < 1; d++) ops_reduction_cuda<OPS_INC>(&arg5[d + (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y) * 1], arg5_l[d]); } // host stub function void ops_par_loop_calc_dt_kernel_get(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { // Timing double t1, t2, c1, c2; ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 6, range, 39)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(39, "calc_dt_kernel_get"); OPS_kernels[39].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != xdim0_calc_dt_kernel_get_h || ydim0 != ydim0_calc_dt_kernel_get_h || xdim1 != xdim1_calc_dt_kernel_get_h || ydim1 != ydim1_calc_dt_kernel_get_h || xdim4 != xdim4_calc_dt_kernel_get_h || ydim4 != ydim4_calc_dt_kernel_get_h) { cudaMemcpyToSymbol(xdim0_calc_dt_kernel_get, &xdim0, sizeof(int)); xdim0_calc_dt_kernel_get_h = xdim0; cudaMemcpyToSymbol(ydim0_calc_dt_kernel_get, &ydim0, sizeof(int)); ydim0_calc_dt_kernel_get_h = ydim0; cudaMemcpyToSymbol(xdim1_calc_dt_kernel_get, &xdim1, sizeof(int)); xdim1_calc_dt_kernel_get_h = xdim1; cudaMemcpyToSymbol(ydim1_calc_dt_kernel_get, &ydim1, sizeof(int)); ydim1_calc_dt_kernel_get_h = ydim1; cudaMemcpyToSymbol(xdim4_calc_dt_kernel_get, &xdim4, sizeof(int)); xdim4_calc_dt_kernel_get_h = xdim4; cudaMemcpyToSymbol(ydim4_calc_dt_kernel_get, &ydim4, sizeof(int)); ydim4_calc_dt_kernel_get_h = ydim4; } #ifdef OPS_MPI double *arg2h = (double *)(((ops_reduction)args[2].data)->data + ((ops_reduction)args[2].data)->size * block->index); #else double *arg2h = (double *)(((ops_reduction)args[2].data)->data); #endif #ifdef OPS_MPI double *arg3h = (double *)(((ops_reduction)args[3].data)->data + ((ops_reduction)args[3].data)->size * block->index); #else double *arg3h = (double *)(((ops_reduction)args[3].data)->data); #endif #ifdef OPS_MPI double *arg5h = (double *)(((ops_reduction)args[5].data)->data + ((ops_reduction)args[5].data)->size * block->index); #else double *arg5h = (double *)(((ops_reduction)args[5].data)->data); #endif dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int nblocks = ((x_size - 1) / OPS_block_size_x + 1) * ((y_size - 1) / OPS_block_size_y + 1) * z_size; int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double)); reduct_size = MAX(reduct_size, sizeof(double) * 1); reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double)); reduct_size = MAX(reduct_size, sizeof(double) * 1); reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double)); reduct_size = MAX(reduct_size, sizeof(double) * 1); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg2.data = OPS_reduct_h + reduct_bytes; arg2.data_d = OPS_reduct_d + reduct_bytes; for (int b = 0; b < maxblocks; b++) for (int d = 0; d < 1; d++) ((double *)arg2.data)[d + b * 1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double)); arg3.data = OPS_reduct_h + reduct_bytes; arg3.data_d = OPS_reduct_d + reduct_bytes; for (int b = 0; b < maxblocks; b++) for (int d = 0; d < 1; d++) ((double *)arg3.data)[d + b * 1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double)); arg5.data = OPS_reduct_h + reduct_bytes; arg5.data_d = OPS_reduct_d + reduct_bytes; for (int b = 0; b < maxblocks; b++) for (int d = 0; d < 1; d++) ((double *)arg5.data)[d + b * 1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks * 1 * sizeof(double)); mvReductArraysToDevice(reduct_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat4 = args[4].dat->elem_size; char *p_a[6]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args, 6, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[39].mpi_time += t2 - t1; } int nshared = 0; int nthread = OPS_block_size_x * OPS_block_size_y; nshared = MAX(nshared, sizeof(double) * 1); nshared = MAX(nshared, sizeof(double) * 1); nshared = MAX(nshared, sizeof(double) * 1); nshared = MAX(nshared * nthread, reduct_size * nthread); // call kernel wrapper function, passing in pointers to data ops_calc_dt_kernel_get<<<grid, tblock, nshared>>>( (double *)p_a[0], (double *)p_a[1], (double *)arg2.data_d, (double *)arg3.data_d, (double *)p_a[4], (double *)arg5.data_d, x_size, y_size, z_size); mvReductArraysToHost(reduct_bytes); for (int b = 0; b < maxblocks; b++) { for (int d = 0; d < 1; d++) { arg2h[d] = arg2h[d] + ((double *)arg2.data)[d + b * 1]; } } arg2.data = (char *)arg2h; for (int b = 0; b < maxblocks; b++) { for (int d = 0; d < 1; d++) { arg3h[d] = arg3h[d] + ((double *)arg3.data)[d + b * 1]; } } arg3.data = (char *)arg3h; for (int b = 0; b < maxblocks; b++) { for (int d = 0; d < 1; d++) { arg5h[d] = arg5h[d] + ((double *)arg5.data)[d + b * 1]; } } arg5.data = (char *)arg5h; if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[39].time += t1 - t2; } ops_set_dirtybit_device(args, 6); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[39].mpi_time += t2 - t1; OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg4); } }
058b0405d7224b85959789a82a468985cff3cd27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity HW5 Histogramming for Speed The goal of this assignment is compute a histogram as fast as possible. We have simplified the problem as much as possible to allow you to focus solely on the histogramming algorithm. The input values that you need to histogram are already the exact bins that need to be updated. This is unlike in HW3 where you needed to compute the range of the data and then do: bin = (val - valMin) / valRange to determine the bin. Here the bin is just: bin = val so the serial histogram calculation looks like: for (i = 0; i < numElems; ++i) histo[val[i]]++; That's it! Your job is to make it run as fast as possible! The values are normally distributed - you may take advantage of this fact in your implementation. */ #include "utils.h" #include "reference_calc.cpp" #define BLOCK_SIZE_MAX 512 // i.e. maximum number of threads per block #define GRID_SIZE_MAX 512 // i.e. maximum number of blocks #define NUMBER_OF_ELEMS_PER_THREAD 16 // Number of elements (values) to be processed per thread __global__ void histogramKernel(const unsigned int* const d_In, //INPUT: values unsigned int* const d_Out, //OUPUT: histogram int numVals, unsigned int valsOffset, unsigned int numBins) { extern __shared__ unsigned int s_histogramKernel_Out[]; int threadsPerBlock = blockDim.x * blockDim.y; int threadsPerGrid = threadsPerBlock * gridDim.x * gridDim.y; int blockId = blockIdx.x + (blockIdx.y * gridDim.x); int threadId = threadIdx.x + (threadIdx.y * blockDim.x); for ( int i = 0; i < (numBins / threadsPerBlock); i++) { int _index = i * threadsPerBlock + threadId; if (_index < numBins) { s_histogramKernel_Out[_index] = 0; } } __syncthreads(); int myId = (blockId * threadsPerBlock) + threadId; for ( int _step = 0; _step < NUMBER_OF_ELEMS_PER_THREAD; _step++) { int _myTrueId = myId + _step * threadsPerGrid; if ( (_myTrueId + valsOffset) >= numVals ) { break; } else { unsigned int _in = d_In[_myTrueId]; atomicAdd(&(s_histogramKernel_Out[_in]), 1); } } __syncthreads(); for ( int i = 0; i < (numBins / threadsPerBlock); i++) { int _index = i * threadsPerBlock + threadId; if (_index < numBins) { atomicAdd(&(d_Out[_index]), s_histogramKernel_Out[_index]); } } } void computeHistogram(const unsigned int* const d_In, //INPUT: values unsigned int* const d_Out, //OUTPUT: histogram const unsigned int numBins, const unsigned int numElems) { unsigned int _numElemsProcessed = 0; dim3 _block(BLOCK_SIZE_MAX); while (_numElemsProcessed < numElems) { int numElemGroupsLeft = (numElems - _numElemsProcessed - 1) / NUMBER_OF_ELEMS_PER_THREAD + 1; int _gridSize = (numElemGroupsLeft - 1) / BLOCK_SIZE_MAX + 1; _gridSize = _gridSize < GRID_SIZE_MAX ? _gridSize : GRID_SIZE_MAX; dim3 _grid(_gridSize); // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( histogramKernel), dim3(_grid), dim3(_block), (numBins * sizeof(unsigned int)), 0, &d_In[_numElemsProcessed], d_Out, numElems, _numElemsProcessed, numBins); _numElemsProcessed += _gridSize * BLOCK_SIZE_MAX * NUMBER_OF_ELEMS_PER_THREAD; } //if you want to use/launch more than one kernel, //feel free hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); /* delete[] h_vals; delete[] h_histo; delete[] your_histo;*/ }
058b0405d7224b85959789a82a468985cff3cd27.cu
/* Udacity HW5 Histogramming for Speed The goal of this assignment is compute a histogram as fast as possible. We have simplified the problem as much as possible to allow you to focus solely on the histogramming algorithm. The input values that you need to histogram are already the exact bins that need to be updated. This is unlike in HW3 where you needed to compute the range of the data and then do: bin = (val - valMin) / valRange to determine the bin. Here the bin is just: bin = val so the serial histogram calculation looks like: for (i = 0; i < numElems; ++i) histo[val[i]]++; That's it! Your job is to make it run as fast as possible! The values are normally distributed - you may take advantage of this fact in your implementation. */ #include "utils.h" #include "reference_calc.cpp" #define BLOCK_SIZE_MAX 512 // i.e. maximum number of threads per block #define GRID_SIZE_MAX 512 // i.e. maximum number of blocks #define NUMBER_OF_ELEMS_PER_THREAD 16 // Number of elements (values) to be processed per thread __global__ void histogramKernel(const unsigned int* const d_In, //INPUT: values unsigned int* const d_Out, //OUPUT: histogram int numVals, unsigned int valsOffset, unsigned int numBins) { extern __shared__ unsigned int s_histogramKernel_Out[]; int threadsPerBlock = blockDim.x * blockDim.y; int threadsPerGrid = threadsPerBlock * gridDim.x * gridDim.y; int blockId = blockIdx.x + (blockIdx.y * gridDim.x); int threadId = threadIdx.x + (threadIdx.y * blockDim.x); for ( int i = 0; i < (numBins / threadsPerBlock); i++) { int _index = i * threadsPerBlock + threadId; if (_index < numBins) { s_histogramKernel_Out[_index] = 0; } } __syncthreads(); int myId = (blockId * threadsPerBlock) + threadId; for ( int _step = 0; _step < NUMBER_OF_ELEMS_PER_THREAD; _step++) { int _myTrueId = myId + _step * threadsPerGrid; if ( (_myTrueId + valsOffset) >= numVals ) { break; } else { unsigned int _in = d_In[_myTrueId]; atomicAdd(&(s_histogramKernel_Out[_in]), 1); } } __syncthreads(); for ( int i = 0; i < (numBins / threadsPerBlock); i++) { int _index = i * threadsPerBlock + threadId; if (_index < numBins) { atomicAdd(&(d_Out[_index]), s_histogramKernel_Out[_index]); } } } void computeHistogram(const unsigned int* const d_In, //INPUT: values unsigned int* const d_Out, //OUTPUT: histogram const unsigned int numBins, const unsigned int numElems) { unsigned int _numElemsProcessed = 0; dim3 _block(BLOCK_SIZE_MAX); while (_numElemsProcessed < numElems) { int numElemGroupsLeft = (numElems - _numElemsProcessed - 1) / NUMBER_OF_ELEMS_PER_THREAD + 1; int _gridSize = (numElemGroupsLeft - 1) / BLOCK_SIZE_MAX + 1; _gridSize = _gridSize < GRID_SIZE_MAX ? _gridSize : GRID_SIZE_MAX; dim3 _grid(_gridSize); // Launch a kernel on the GPU with one thread for each element. histogramKernel<<<_grid, _block, (numBins * sizeof(unsigned int))>>> (&d_In[_numElemsProcessed], d_Out, numElems, _numElemsProcessed, numBins); _numElemsProcessed += _gridSize * BLOCK_SIZE_MAX * NUMBER_OF_ELEMS_PER_THREAD; } //if you want to use/launch more than one kernel, //feel free cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); /* delete[] h_vals; delete[] h_histo; delete[] your_histo;*/ }
d25a1184e7fc69551b32f0ed636b6c461facaef0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdint.h> #include "kernel_hip.cuh" __global__ void kernel_impl( const uint8_t* input, const size_t inputWidth, uint8_t* output, const size_t outputWidth) { int outputX = blockIdx.x * 32 + threadIdx.x; int outputY = blockIdx.y * 32 + threadIdx.y; int inputXBasic = outputX * 2; int inputYBasic = outputY * 2; float result = 0.0f; for (int x = 0; x < 4; x++) { for (int y = 0; y < 4; y++) { int inputX = inputXBasic + x; int inputY = inputYBasic + y; int inputIndex = inputY * inputWidth + inputX; result += input[inputIndex]; } } output[outputY * outputWidth + outputX] = static_cast<uint8_t>(result / 16.0f); } void kernel( const std::size_t inputWidth, const std::size_t inputHeight, const uint8_t* input, const std::size_t outputWidth, const std::size_t outputHeight, uint8_t* output) { dim3 grid(outputWidth / 32, outputHeight / 32); dim3 block(32, 32); const std::size_t inputSize = inputWidth * inputHeight; const std::size_t outputSize = outputWidth * outputHeight; uint8_t* inputGpu; uint8_t* outputGpu; gpuCheckError(hipMalloc(reinterpret_cast<void**>(&inputGpu), inputSize * sizeof(uint8_t))); gpuCheckError(hipMalloc(reinterpret_cast<void**>(&outputGpu), outputSize * sizeof(uint8_t))); gpuCheckError(hipMemcpy(inputGpu, input, inputSize * sizeof(uint8_t), hipMemcpyHostToDevice)); gpuCheckError(hipMemcpy(outputGpu, output, outputSize * sizeof(uint8_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel_impl) , dim3(grid), dim3(block) , 0, 0, inputGpu, inputWidth, outputGpu, outputWidth); gpuCheckError(hipMemcpy(output, outputGpu, outputSize * sizeof(uint8_t), hipMemcpyDeviceToHost)); gpuCheckError(hipFree(inputGpu)); gpuCheckError(hipFree(outputGpu)); }
d25a1184e7fc69551b32f0ed636b6c461facaef0.cu
#include <stdint.h> #include "kernel.cuh" __global__ void kernel_impl( const uint8_t* input, const size_t inputWidth, uint8_t* output, const size_t outputWidth) { int outputX = blockIdx.x * 32 + threadIdx.x; int outputY = blockIdx.y * 32 + threadIdx.y; int inputXBasic = outputX * 2; int inputYBasic = outputY * 2; float result = 0.0f; for (int x = 0; x < 4; x++) { for (int y = 0; y < 4; y++) { int inputX = inputXBasic + x; int inputY = inputYBasic + y; int inputIndex = inputY * inputWidth + inputX; result += input[inputIndex]; } } output[outputY * outputWidth + outputX] = static_cast<uint8_t>(result / 16.0f); } void kernel( const std::size_t inputWidth, const std::size_t inputHeight, const uint8_t* input, const std::size_t outputWidth, const std::size_t outputHeight, uint8_t* output) { dim3 grid(outputWidth / 32, outputHeight / 32); dim3 block(32, 32); const std::size_t inputSize = inputWidth * inputHeight; const std::size_t outputSize = outputWidth * outputHeight; uint8_t* inputGpu; uint8_t* outputGpu; gpuCheckError(cudaMalloc(reinterpret_cast<void**>(&inputGpu), inputSize * sizeof(uint8_t))); gpuCheckError(cudaMalloc(reinterpret_cast<void**>(&outputGpu), outputSize * sizeof(uint8_t))); gpuCheckError(cudaMemcpy(inputGpu, input, inputSize * sizeof(uint8_t), cudaMemcpyHostToDevice)); gpuCheckError(cudaMemcpy(outputGpu, output, outputSize * sizeof(uint8_t), cudaMemcpyHostToDevice)); kernel_impl <<< grid, block >>> (inputGpu, inputWidth, outputGpu, outputWidth); gpuCheckError(cudaMemcpy(output, outputGpu, outputSize * sizeof(uint8_t), cudaMemcpyDeviceToHost)); gpuCheckError(cudaFree(inputGpu)); gpuCheckError(cudaFree(outputGpu)); }
13246557fa37ebef886650671a2c36de9866bc97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/index_sample_grad_kernel.h" #include <algorithm> #include <vector> #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { namespace { #define PREDEFINED_BLOCK_SIZE_X 512 #define PREDEFINED_BLOCK_SIZE 1024 #define MIN(a, b) ((a) < (b) ? (a) : (b)) } // namespace template <typename T, typename IndexT = int> __global__ void IndexSampleGrad(const IndexT* index, T* in_grad, const T* out_grad, size_t index_length, size_t input_length, size_t batch_size, bool same_data_in_row = true) { unsigned int index_i = blockDim.x * blockIdx.x + threadIdx.x; unsigned int index_j = blockDim.y * blockIdx.y + threadIdx.y; for (; index_j < batch_size; index_j += blockDim.y * gridDim.y) { index_i = blockDim.x * blockIdx.x + threadIdx.x; for (; index_i < index_length; index_i += blockDim.x * gridDim.x) { unsigned int index_idx = index_j * index_length + index_i; unsigned int in_idx = index_j * input_length + index_i; IndexT sample_idx = index[index_idx]; if (same_data_in_row) { paddle::platform::CudaAtomicAdd( &(in_grad[in_idx - index_i + sample_idx]), out_grad[sample_idx]); } else { in_grad[in_idx - index_i + sample_idx] = out_grad[index_idx]; } } } } template <typename T, typename Context> void IndexSampleGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& index, const DenseTensor& out_grad, DenseTensor* x_grad) { const T* output_grad_data = out_grad.data<T>(); T* input_grad_data = ctx.template Alloc<T>(x_grad); auto index_type = index.dtype(); bool index_type_match = index_type == DataType::INT32 || index_type == DataType::INT64; PADDLE_ENFORCE_EQ( index_type_match, true, errors::InvalidArgument( "Input(Index) holds the wrong type, it holds %s, but " "desires to be %s or %s", paddle::framework::DataTypeToString( paddle::framework::TransToProtoVarType(index_type)), paddle::framework::DataTypeToString( paddle::framework::TransToProtoVarType(DataType::INT32)), paddle::framework::DataTypeToString( paddle::framework::TransToProtoVarType((DataType::INT64))))); auto stream = reinterpret_cast<const phi::GPUContext&>(ctx).stream(); auto input_num = x.numel(); auto input_dim = x.dims(); auto index_dim = index.dims(); size_t batch_size = index_dim[0]; size_t input_length = input_dim[1]; size_t index_length = index_dim[1]; bool same_data_in_index_row = index_length == 1 ? false : true; auto block_width = paddle::platform::RoundToPowerOfTwo(index_length); block_width = MIN(block_width, PREDEFINED_BLOCK_SIZE_X); auto block_height = paddle::platform::RoundToPowerOfTwo(index_length * batch_size) / block_width; block_height = MIN(block_height, PREDEFINED_BLOCK_SIZE / block_width); dim3 block_dim(block_width, block_height); dim3 grid_dim((index_length + block_dim.x - 1) / block_dim.x, (batch_size + block_dim.y - 1) / block_dim.y); paddle::platform::LimitGridDim(ctx, &grid_dim); phi::funcs::SetConstant<Context, T> set_zero; set_zero(ctx, x_grad, static_cast<T>(0)); if (index_type == DataType::INT64) { const int64_t* index_data = index.data<int64_t>(); hipLaunchKernelGGL(( IndexSampleGrad<T, int64_t>), dim3(grid_dim), dim3(block_dim), 0, stream, index_data, input_grad_data, output_grad_data, index_length, input_length, batch_size, same_data_in_index_row); } else if (index_type == DataType::INT32) { const int* index_data = index.data<int>(); hipLaunchKernelGGL(( IndexSampleGrad<T, int>), dim3(grid_dim), dim3(block_dim), 0, stream, index_data, input_grad_data, output_grad_data, index_length, input_length, batch_size, same_data_in_index_row); } } } // namespace phi PD_REGISTER_KERNEL(index_sample_grad, GPU, ALL_LAYOUT, phi::IndexSampleGradKernel, float, double, int, int64_t) {}
13246557fa37ebef886650671a2c36de9866bc97.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/index_sample_grad_kernel.h" #include <algorithm> #include <vector> #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { namespace { #define PREDEFINED_BLOCK_SIZE_X 512 #define PREDEFINED_BLOCK_SIZE 1024 #define MIN(a, b) ((a) < (b) ? (a) : (b)) } // namespace template <typename T, typename IndexT = int> __global__ void IndexSampleGrad(const IndexT* index, T* in_grad, const T* out_grad, size_t index_length, size_t input_length, size_t batch_size, bool same_data_in_row = true) { unsigned int index_i = blockDim.x * blockIdx.x + threadIdx.x; unsigned int index_j = blockDim.y * blockIdx.y + threadIdx.y; for (; index_j < batch_size; index_j += blockDim.y * gridDim.y) { index_i = blockDim.x * blockIdx.x + threadIdx.x; for (; index_i < index_length; index_i += blockDim.x * gridDim.x) { unsigned int index_idx = index_j * index_length + index_i; unsigned int in_idx = index_j * input_length + index_i; IndexT sample_idx = index[index_idx]; if (same_data_in_row) { paddle::platform::CudaAtomicAdd( &(in_grad[in_idx - index_i + sample_idx]), out_grad[sample_idx]); } else { in_grad[in_idx - index_i + sample_idx] = out_grad[index_idx]; } } } } template <typename T, typename Context> void IndexSampleGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& index, const DenseTensor& out_grad, DenseTensor* x_grad) { const T* output_grad_data = out_grad.data<T>(); T* input_grad_data = ctx.template Alloc<T>(x_grad); auto index_type = index.dtype(); bool index_type_match = index_type == DataType::INT32 || index_type == DataType::INT64; PADDLE_ENFORCE_EQ( index_type_match, true, errors::InvalidArgument( "Input(Index) holds the wrong type, it holds %s, but " "desires to be %s or %s", paddle::framework::DataTypeToString( paddle::framework::TransToProtoVarType(index_type)), paddle::framework::DataTypeToString( paddle::framework::TransToProtoVarType(DataType::INT32)), paddle::framework::DataTypeToString( paddle::framework::TransToProtoVarType((DataType::INT64))))); auto stream = reinterpret_cast<const phi::GPUContext&>(ctx).stream(); auto input_num = x.numel(); auto input_dim = x.dims(); auto index_dim = index.dims(); size_t batch_size = index_dim[0]; size_t input_length = input_dim[1]; size_t index_length = index_dim[1]; bool same_data_in_index_row = index_length == 1 ? false : true; auto block_width = paddle::platform::RoundToPowerOfTwo(index_length); block_width = MIN(block_width, PREDEFINED_BLOCK_SIZE_X); auto block_height = paddle::platform::RoundToPowerOfTwo(index_length * batch_size) / block_width; block_height = MIN(block_height, PREDEFINED_BLOCK_SIZE / block_width); dim3 block_dim(block_width, block_height); dim3 grid_dim((index_length + block_dim.x - 1) / block_dim.x, (batch_size + block_dim.y - 1) / block_dim.y); paddle::platform::LimitGridDim(ctx, &grid_dim); phi::funcs::SetConstant<Context, T> set_zero; set_zero(ctx, x_grad, static_cast<T>(0)); if (index_type == DataType::INT64) { const int64_t* index_data = index.data<int64_t>(); IndexSampleGrad<T, int64_t><<<grid_dim, block_dim, 0, stream>>>( index_data, input_grad_data, output_grad_data, index_length, input_length, batch_size, same_data_in_index_row); } else if (index_type == DataType::INT32) { const int* index_data = index.data<int>(); IndexSampleGrad<T, int><<<grid_dim, block_dim, 0, stream>>>( index_data, input_grad_data, output_grad_data, index_length, input_length, batch_size, same_data_in_index_row); } } } // namespace phi PD_REGISTER_KERNEL(index_sample_grad, GPU, ALL_LAYOUT, phi::IndexSampleGradKernel, float, double, int, int64_t) {}
476035e5fb118e0e7897a0d1a50b3a4c9288b251.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define ITERATIONS REPLACE_ITERATIONS #include "../include/ContAcq-IntClk.h" // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1; float Value2; float Value3; float Value; float I1=A[i]; float I2=B[i]; // Excessive Addition access if(((i%32)<=23)) { for(unsigned k=0; k<ITERATIONS;k++) { Value1=I1+I2; Value3=I1-I2; Value1+=Value2; Value1+=Value2; Value2=Value3-Value1; Value1=Value2+Value3; } } __syncthreads(); Value=Value1; C[i]=Value+Value2; } int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after\n"); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
476035e5fb118e0e7897a0d1a50b3a4c9288b251.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define ITERATIONS REPLACE_ITERATIONS #include "../include/ContAcq-IntClk.h" // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1; float Value2; float Value3; float Value; float I1=A[i]; float I2=B[i]; // Excessive Addition access if(((i%32)<=23)) { for(unsigned k=0; k<ITERATIONS;k++) { Value1=I1+I2; Value3=I1-I2; Value1+=Value2; Value1+=Value2; Value2=Value3-Value1; Value1=Value2+Value3; } } __syncthreads(); Value=Value1; C[i]=Value+Value2; } int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after\n"); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
07e4e927a69f775294ce2551784969364ad0faf5.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/inner_product_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template<typename Dtype> void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM if (M_ == 1) { caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype) 1., weight, bottom_data, (Dtype) 0., top_data); if (bias_term_) caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], this->blobs_[1]->gpu_data(), top_data); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans, M_, N_, K_, (Dtype) 1., bottom_data, weight, (Dtype) 0., top_data); if (bias_term_) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype) 1., top_data); } #endif // USE CUDA } else { #ifdef USE_GREENTEA if (M_ == 1) { greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, N_, K_, (Dtype) 1., (cl_mem) weight, 0, (cl_mem) bottom_data, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) greentea_gpu_axpy<Dtype>(this->device_->id(), N_, bias_multiplier_.cpu_data()[0], (cl_mem) (this->blobs_[1]->gpu_data()), 0, (cl_mem) top_data, 0); } else { greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans, M_, N_, K_, (Dtype) 1., (cl_mem) bottom_data, 0, (cl_mem) weight, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., (cl_mem) (bias_multiplier_.gpu_data()), 0, (cl_mem) (this->blobs_[1]->gpu_data()), 0, (Dtype) 1., (cl_mem) top_data, 0); } #endif // USE_GREENTEA } } template<typename Dtype> void InnerProductLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight if (transpose_) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., bottom_data, top_diff, (Dtype) 1., this->blobs_[0]->mutable_gpu_diff()); } else { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., top_diff, bottom_data, (Dtype) 1., this->blobs_[0]->mutable_gpu_diff()); } } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype) 1., top_diff, bias_multiplier_.gpu_data(), (Dtype) 1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data if (transpose_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0., bottom[0]->mutable_gpu_diff()); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0., bottom[0]->mutable_gpu_diff()); } } #endif // USE_ROCM } else { #ifdef USE_GREENTEA if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight if (transpose_) { greentea_gpu_gemm<Dtype>(this->device_->id(), CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., (cl_mem) bottom_data, 0, (cl_mem) top_diff, 0, (Dtype) 1., (cl_mem) (this->blobs_[0]->mutable_gpu_diff()), 0); } else { greentea_gpu_gemm<Dtype>(this->device_->id(), CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) bottom_data, 0, (Dtype) 1., (cl_mem) (this->blobs_[0]->mutable_gpu_diff()), 0); } } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, M_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., (cl_mem) (this->blobs_[1]->mutable_gpu_diff()), 0); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data if (transpose_) { greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (this->blobs_[0]->gpu_data()), 0, (Dtype) 0., (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); } else { greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (this->blobs_[0]->gpu_data()), 0, (Dtype) 0., (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); } } #endif // USE_GREENTEA } } INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer); } // namespace caffe
07e4e927a69f775294ce2551784969364ad0faf5.cu
#include <vector> #include "caffe/filler.hpp" #include "caffe/layers/inner_product_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template<typename Dtype> void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (M_ == 1) { caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype) 1., weight, bottom_data, (Dtype) 0., top_data); if (bias_term_) caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], this->blobs_[1]->gpu_data(), top_data); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans, M_, N_, K_, (Dtype) 1., bottom_data, weight, (Dtype) 0., top_data); if (bias_term_) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype) 1., top_data); } #endif // USE CUDA } else { #ifdef USE_GREENTEA if (M_ == 1) { greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, N_, K_, (Dtype) 1., (cl_mem) weight, 0, (cl_mem) bottom_data, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) greentea_gpu_axpy<Dtype>(this->device_->id(), N_, bias_multiplier_.cpu_data()[0], (cl_mem) (this->blobs_[1]->gpu_data()), 0, (cl_mem) top_data, 0); } else { greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans, M_, N_, K_, (Dtype) 1., (cl_mem) bottom_data, 0, (cl_mem) weight, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., (cl_mem) (bias_multiplier_.gpu_data()), 0, (cl_mem) (this->blobs_[1]->gpu_data()), 0, (Dtype) 1., (cl_mem) top_data, 0); } #endif // USE_GREENTEA } } template<typename Dtype> void InnerProductLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight if (transpose_) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., bottom_data, top_diff, (Dtype) 1., this->blobs_[0]->mutable_gpu_diff()); } else { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., top_diff, bottom_data, (Dtype) 1., this->blobs_[0]->mutable_gpu_diff()); } } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype) 1., top_diff, bias_multiplier_.gpu_data(), (Dtype) 1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data if (transpose_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0., bottom[0]->mutable_gpu_diff()); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0., bottom[0]->mutable_gpu_diff()); } } #endif // USE_CUDA } else { #ifdef USE_GREENTEA if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight if (transpose_) { greentea_gpu_gemm<Dtype>(this->device_->id(), CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., (cl_mem) bottom_data, 0, (cl_mem) top_diff, 0, (Dtype) 1., (cl_mem) (this->blobs_[0]->mutable_gpu_diff()), 0); } else { greentea_gpu_gemm<Dtype>(this->device_->id(), CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) bottom_data, 0, (Dtype) 1., (cl_mem) (this->blobs_[0]->mutable_gpu_diff()), 0); } } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, M_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., (cl_mem) (this->blobs_[1]->mutable_gpu_diff()), 0); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data if (transpose_) { greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (this->blobs_[0]->gpu_data()), 0, (Dtype) 0., (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); } else { greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (this->blobs_[0]->gpu_data()), 0, (Dtype) 0., (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); } } #endif // USE_GREENTEA } } INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer); } // namespace caffe
41ee97322750916183171563c8a808dffd0f0388.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void getMaxPorb(const int size, const float* class_prob, const int class_num, float* max_prob, int* idx, int *class_idx, const int conf_thresh) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < size) { // printf("run here %d!\n", index); float temp_max_prob = 0.0f; const float *start = class_prob + index * class_num; int class_index = -1; for(int i = 0; i < class_num; i++) { float curr_prob = start[i]; if(temp_max_prob <= curr_prob) { class_index = i; temp_max_prob = curr_prob; } } max_prob[index] = 0.0f; if(temp_max_prob >= conf_thresh) { // atomicAdd(detecNum, 1); max_prob[index] = temp_max_prob; // printf("run here %d!\n", index); } idx[index] = index; class_idx[index] = class_index; } }
41ee97322750916183171563c8a808dffd0f0388.cu
#include "includes.h" __global__ void getMaxPorb(const int size, const float* class_prob, const int class_num, float* max_prob, int* idx, int *class_idx, const int conf_thresh) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < size) { // printf("run here %d!\n", index); float temp_max_prob = 0.0f; const float *start = class_prob + index * class_num; int class_index = -1; for(int i = 0; i < class_num; i++) { float curr_prob = start[i]; if(temp_max_prob <= curr_prob) { class_index = i; temp_max_prob = curr_prob; } } max_prob[index] = 0.0f; if(temp_max_prob >= conf_thresh) { // atomicAdd(detecNum, 1); max_prob[index] = temp_max_prob; // printf("run here %d!\n", index); } idx[index] = index; class_idx[index] = class_index; } }
d121bd4a21f8623e0ab75dc05bc3dd96b49bbd5d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2022, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/solver/idr_kernels.hpp" #include <ctime> #include <random> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include "core/components/fill_array_kernels.hpp" #include "cuda/base/config.hpp" #include "cuda/base/cublas_bindings.hpp" #include "cuda/base/curand_bindings.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/thread_ids.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The IDR solver namespace. * * @ingroup idr */ namespace idr { constexpr int default_block_size = 512; constexpr int default_dot_dim = 32; constexpr int default_dot_size = default_dot_dim * default_dot_dim; #include "common/cuda_hip/solver/idr_kernels.hpp.inc" namespace { template <typename ValueType> void initialize_m(const size_type nrhs, matrix::Dense<ValueType>* m, array<stopping_status>* stop_status) { const auto subspace_dim = m->get_size()[0]; const auto m_stride = m->get_stride(); const auto grid_dim = ceildiv(m_stride * subspace_dim, default_block_size); hipLaunchKernelGGL(( initialize_m_kernel), dim3(grid_dim), dim3(default_block_size), 0, 0, subspace_dim, nrhs, as_cuda_type(m->get_values()), m_stride, as_cuda_type(stop_status->get_data())); } template <typename ValueType> void initialize_subspace_vectors(matrix::Dense<ValueType>* subspace_vectors, bool deterministic) { if (deterministic) { auto subspace_vectors_data = matrix_data<ValueType>( subspace_vectors->get_size(), std::normal_distribution<>(0.0, 1.0), std::default_random_engine(15)); subspace_vectors->read(subspace_vectors_data); } else { auto gen = hiprand::rand_generator(time(NULL), HIPRAND_RNG_PSEUDO_DEFAULT); hiprand::rand_vector( gen, subspace_vectors->get_size()[0] * subspace_vectors->get_stride(), 0.0, 1.0, subspace_vectors->get_values()); } } template <typename ValueType> void orthonormalize_subspace_vectors(matrix::Dense<ValueType>* subspace_vectors) { hipLaunchKernelGGL(( orthonormalize_subspace_vectors_kernel<default_block_size>) , dim3(1), dim3(default_block_size), 0, 0, subspace_vectors->get_size()[0], subspace_vectors->get_size()[1], as_cuda_type(subspace_vectors->get_values()), subspace_vectors->get_stride()); } template <typename ValueType> void solve_lower_triangular(const size_type nrhs, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* c, const array<stopping_status>* stop_status) { const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(nrhs, default_block_size); hipLaunchKernelGGL(( solve_lower_triangular_kernel), dim3(grid_dim), dim3(default_block_size), 0, 0, subspace_dim, nrhs, as_cuda_type(m->get_const_values()), m->get_stride(), as_cuda_type(f->get_const_values()), f->get_stride(), as_cuda_type(c->get_values()), c->get_stride(), as_cuda_type(stop_status->get_const_data())); } template <typename ValueType> void update_g_and_u(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, const matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* alpha, matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* u, const array<stopping_status>* stop_status) { if (nrhs == 0) { return; } const auto size = g->get_size()[0]; const auto p_stride = p->get_stride(); const dim3 grid_dim(ceildiv(nrhs, default_dot_dim), exec->get_num_multiprocessor() * 2); const dim3 block_dim(default_dot_dim, default_dot_dim); for (size_type i = 0; i < k; i++) { const auto p_i = p->get_const_values() + i * p_stride; if (nrhs > 1 || is_complex<ValueType>()) { components::fill_array(exec, alpha->get_values(), nrhs, zero<ValueType>()); hipLaunchKernelGGL(( multidot_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, size, nrhs, as_cuda_type(p_i), as_cuda_type(g_k->get_values()), g_k->get_stride(), as_cuda_type(alpha->get_values()), as_cuda_type(stop_status->get_const_data())); } else { cublas::dot(exec->get_cublas_handle(), size, p_i, 1, g_k->get_values(), g_k->get_stride(), alpha->get_values()); } hipLaunchKernelGGL(( update_g_k_and_u_kernel<default_block_size>) , dim3(ceildiv(size * g_k->get_stride(), default_block_size)), dim3(default_block_size), 0, 0, k, i, size, nrhs, as_cuda_type(alpha->get_const_values()), as_cuda_type(m->get_const_values()), m->get_stride(), as_cuda_type(g->get_const_values()), g->get_stride(), as_cuda_type(g_k->get_values()), g_k->get_stride(), as_cuda_type(u->get_values()), u->get_stride(), as_cuda_type(stop_status->get_const_data())); } hipLaunchKernelGGL(( update_g_kernel<default_block_size>) , dim3(ceildiv(size * g_k->get_stride(), default_block_size)), dim3(default_block_size), 0, 0, k, size, nrhs, as_cuda_type(g_k->get_const_values()), g_k->get_stride(), as_cuda_type(g->get_values()), g->get_stride(), as_cuda_type(stop_status->get_const_data())); } template <typename ValueType> void update_m(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, const matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* m, const array<stopping_status>* stop_status) { if (nrhs == 0) { return; } const auto size = g_k->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto p_stride = p->get_stride(); const auto m_stride = m->get_stride(); const dim3 grid_dim(ceildiv(nrhs, default_dot_dim), exec->get_num_multiprocessor() * 2); const dim3 block_dim(default_dot_dim, default_dot_dim); for (size_type i = k; i < subspace_dim; i++) { const auto p_i = p->get_const_values() + i * p_stride; auto m_i = m->get_values() + i * m_stride + k * nrhs; if (nrhs > 1 || is_complex<ValueType>()) { components::fill_array(exec, m_i, nrhs, zero<ValueType>()); hipLaunchKernelGGL(( multidot_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, size, nrhs, as_cuda_type(p_i), as_cuda_type(g_k->get_const_values()), g_k->get_stride(), as_cuda_type(m_i), as_cuda_type(stop_status->get_const_data())); } else { cublas::dot(exec->get_cublas_handle(), size, p_i, 1, g_k->get_const_values(), g_k->get_stride(), m_i); } } } template <typename ValueType> void update_x_r_and_f(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* g, const matrix::Dense<ValueType>* u, matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* r, matrix::Dense<ValueType>* x, const array<stopping_status>* stop_status) { const auto size = x->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(size * x->get_stride(), default_block_size); hipLaunchKernelGGL(( update_x_r_and_f_kernel), dim3(grid_dim), dim3(default_block_size), 0, 0, k, size, subspace_dim, nrhs, as_cuda_type(m->get_const_values()), m->get_stride(), as_cuda_type(g->get_const_values()), g->get_stride(), as_cuda_type(u->get_const_values()), u->get_stride(), as_cuda_type(f->get_values()), f->get_stride(), as_cuda_type(r->get_values()), r->get_stride(), as_cuda_type(x->get_values()), x->get_stride(), as_cuda_type(stop_status->get_const_data())); components::fill_array(exec, f->get_values() + k * f->get_stride(), nrhs, zero<ValueType>()); } } // namespace template <typename ValueType> void initialize(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* subspace_vectors, bool deterministic, array<stopping_status>* stop_status) { initialize_m(nrhs, m, stop_status); initialize_subspace_vectors(subspace_vectors, deterministic); orthonormalize_subspace_vectors(subspace_vectors); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_INITIALIZE_KERNEL); template <typename ValueType> void step_1(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* f, const matrix::Dense<ValueType>* residual, const matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* c, matrix::Dense<ValueType>* v, const array<stopping_status>* stop_status) { solve_lower_triangular(nrhs, m, f, c, stop_status); const auto num_rows = v->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(nrhs * num_rows, default_block_size); hipLaunchKernelGGL(( step_1_kernel), dim3(grid_dim), dim3(default_block_size), 0, 0, k, num_rows, subspace_dim, nrhs, as_cuda_type(residual->get_const_values()), residual->get_stride(), as_cuda_type(c->get_const_values()), c->get_stride(), as_cuda_type(g->get_const_values()), g->get_stride(), as_cuda_type(v->get_values()), v->get_stride(), as_cuda_type(stop_status->get_const_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_1_KERNEL); template <typename ValueType> void step_2(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* omega, const matrix::Dense<ValueType>* preconditioned_vector, const matrix::Dense<ValueType>* c, matrix::Dense<ValueType>* u, const array<stopping_status>* stop_status) { if (nrhs == 0) { return; } const auto num_rows = preconditioned_vector->get_size()[0]; const auto subspace_dim = u->get_size()[1] / nrhs; const auto grid_dim = ceildiv(nrhs * num_rows, default_block_size); hipLaunchKernelGGL(( step_2_kernel), dim3(grid_dim), dim3(default_block_size), 0, 0, k, num_rows, subspace_dim, nrhs, as_cuda_type(omega->get_const_values()), as_cuda_type(preconditioned_vector->get_const_values()), preconditioned_vector->get_stride(), as_cuda_type(c->get_const_values()), c->get_stride(), as_cuda_type(u->get_values()), u->get_stride(), as_cuda_type(stop_status->get_const_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_2_KERNEL); template <typename ValueType> void step_3(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* u, matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* alpha, matrix::Dense<ValueType>* residual, matrix::Dense<ValueType>* x, const array<stopping_status>* stop_status) { update_g_and_u(exec, nrhs, k, p, m, alpha, g, g_k, u, stop_status); update_m(exec, nrhs, k, p, g_k, m, stop_status); update_x_r_and_f(exec, nrhs, k, m, g, u, f, residual, x, stop_status); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_3_KERNEL); template <typename ValueType> void compute_omega( std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const remove_complex<ValueType> kappa, const matrix::Dense<ValueType>* tht, const matrix::Dense<remove_complex<ValueType>>* residual_norm, matrix::Dense<ValueType>* omega, const array<stopping_status>* stop_status) { const auto grid_dim = ceildiv(nrhs, config::warp_size); hipLaunchKernelGGL(( compute_omega_kernel), dim3(grid_dim), dim3(config::warp_size), 0, 0, nrhs, kappa, as_cuda_type(tht->get_const_values()), as_cuda_type(residual_norm->get_const_values()), as_cuda_type(omega->get_values()), as_cuda_type(stop_status->get_const_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_COMPUTE_OMEGA_KERNEL); } // namespace idr } // namespace cuda } // namespace kernels } // namespace gko
d121bd4a21f8623e0ab75dc05bc3dd96b49bbd5d.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2022, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/solver/idr_kernels.hpp" #include <ctime> #include <random> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include "core/components/fill_array_kernels.hpp" #include "cuda/base/config.hpp" #include "cuda/base/cublas_bindings.hpp" #include "cuda/base/curand_bindings.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/thread_ids.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The IDR solver namespace. * * @ingroup idr */ namespace idr { constexpr int default_block_size = 512; constexpr int default_dot_dim = 32; constexpr int default_dot_size = default_dot_dim * default_dot_dim; #include "common/cuda_hip/solver/idr_kernels.hpp.inc" namespace { template <typename ValueType> void initialize_m(const size_type nrhs, matrix::Dense<ValueType>* m, array<stopping_status>* stop_status) { const auto subspace_dim = m->get_size()[0]; const auto m_stride = m->get_stride(); const auto grid_dim = ceildiv(m_stride * subspace_dim, default_block_size); initialize_m_kernel<<<grid_dim, default_block_size>>>( subspace_dim, nrhs, as_cuda_type(m->get_values()), m_stride, as_cuda_type(stop_status->get_data())); } template <typename ValueType> void initialize_subspace_vectors(matrix::Dense<ValueType>* subspace_vectors, bool deterministic) { if (deterministic) { auto subspace_vectors_data = matrix_data<ValueType>( subspace_vectors->get_size(), std::normal_distribution<>(0.0, 1.0), std::default_random_engine(15)); subspace_vectors->read(subspace_vectors_data); } else { auto gen = curand::rand_generator(time(NULL), CURAND_RNG_PSEUDO_DEFAULT); curand::rand_vector( gen, subspace_vectors->get_size()[0] * subspace_vectors->get_stride(), 0.0, 1.0, subspace_vectors->get_values()); } } template <typename ValueType> void orthonormalize_subspace_vectors(matrix::Dense<ValueType>* subspace_vectors) { orthonormalize_subspace_vectors_kernel<default_block_size> <<<1, default_block_size>>>( subspace_vectors->get_size()[0], subspace_vectors->get_size()[1], as_cuda_type(subspace_vectors->get_values()), subspace_vectors->get_stride()); } template <typename ValueType> void solve_lower_triangular(const size_type nrhs, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* c, const array<stopping_status>* stop_status) { const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(nrhs, default_block_size); solve_lower_triangular_kernel<<<grid_dim, default_block_size>>>( subspace_dim, nrhs, as_cuda_type(m->get_const_values()), m->get_stride(), as_cuda_type(f->get_const_values()), f->get_stride(), as_cuda_type(c->get_values()), c->get_stride(), as_cuda_type(stop_status->get_const_data())); } template <typename ValueType> void update_g_and_u(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, const matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* alpha, matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* u, const array<stopping_status>* stop_status) { if (nrhs == 0) { return; } const auto size = g->get_size()[0]; const auto p_stride = p->get_stride(); const dim3 grid_dim(ceildiv(nrhs, default_dot_dim), exec->get_num_multiprocessor() * 2); const dim3 block_dim(default_dot_dim, default_dot_dim); for (size_type i = 0; i < k; i++) { const auto p_i = p->get_const_values() + i * p_stride; if (nrhs > 1 || is_complex<ValueType>()) { components::fill_array(exec, alpha->get_values(), nrhs, zero<ValueType>()); multidot_kernel<<<grid_dim, block_dim>>>( size, nrhs, as_cuda_type(p_i), as_cuda_type(g_k->get_values()), g_k->get_stride(), as_cuda_type(alpha->get_values()), as_cuda_type(stop_status->get_const_data())); } else { cublas::dot(exec->get_cublas_handle(), size, p_i, 1, g_k->get_values(), g_k->get_stride(), alpha->get_values()); } update_g_k_and_u_kernel<default_block_size> <<<ceildiv(size * g_k->get_stride(), default_block_size), default_block_size>>>( k, i, size, nrhs, as_cuda_type(alpha->get_const_values()), as_cuda_type(m->get_const_values()), m->get_stride(), as_cuda_type(g->get_const_values()), g->get_stride(), as_cuda_type(g_k->get_values()), g_k->get_stride(), as_cuda_type(u->get_values()), u->get_stride(), as_cuda_type(stop_status->get_const_data())); } update_g_kernel<default_block_size> <<<ceildiv(size * g_k->get_stride(), default_block_size), default_block_size>>>( k, size, nrhs, as_cuda_type(g_k->get_const_values()), g_k->get_stride(), as_cuda_type(g->get_values()), g->get_stride(), as_cuda_type(stop_status->get_const_data())); } template <typename ValueType> void update_m(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, const matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* m, const array<stopping_status>* stop_status) { if (nrhs == 0) { return; } const auto size = g_k->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto p_stride = p->get_stride(); const auto m_stride = m->get_stride(); const dim3 grid_dim(ceildiv(nrhs, default_dot_dim), exec->get_num_multiprocessor() * 2); const dim3 block_dim(default_dot_dim, default_dot_dim); for (size_type i = k; i < subspace_dim; i++) { const auto p_i = p->get_const_values() + i * p_stride; auto m_i = m->get_values() + i * m_stride + k * nrhs; if (nrhs > 1 || is_complex<ValueType>()) { components::fill_array(exec, m_i, nrhs, zero<ValueType>()); multidot_kernel<<<grid_dim, block_dim>>>( size, nrhs, as_cuda_type(p_i), as_cuda_type(g_k->get_const_values()), g_k->get_stride(), as_cuda_type(m_i), as_cuda_type(stop_status->get_const_data())); } else { cublas::dot(exec->get_cublas_handle(), size, p_i, 1, g_k->get_const_values(), g_k->get_stride(), m_i); } } } template <typename ValueType> void update_x_r_and_f(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* g, const matrix::Dense<ValueType>* u, matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* r, matrix::Dense<ValueType>* x, const array<stopping_status>* stop_status) { const auto size = x->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(size * x->get_stride(), default_block_size); update_x_r_and_f_kernel<<<grid_dim, default_block_size>>>( k, size, subspace_dim, nrhs, as_cuda_type(m->get_const_values()), m->get_stride(), as_cuda_type(g->get_const_values()), g->get_stride(), as_cuda_type(u->get_const_values()), u->get_stride(), as_cuda_type(f->get_values()), f->get_stride(), as_cuda_type(r->get_values()), r->get_stride(), as_cuda_type(x->get_values()), x->get_stride(), as_cuda_type(stop_status->get_const_data())); components::fill_array(exec, f->get_values() + k * f->get_stride(), nrhs, zero<ValueType>()); } } // namespace template <typename ValueType> void initialize(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* subspace_vectors, bool deterministic, array<stopping_status>* stop_status) { initialize_m(nrhs, m, stop_status); initialize_subspace_vectors(subspace_vectors, deterministic); orthonormalize_subspace_vectors(subspace_vectors); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_INITIALIZE_KERNEL); template <typename ValueType> void step_1(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* f, const matrix::Dense<ValueType>* residual, const matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* c, matrix::Dense<ValueType>* v, const array<stopping_status>* stop_status) { solve_lower_triangular(nrhs, m, f, c, stop_status); const auto num_rows = v->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(nrhs * num_rows, default_block_size); step_1_kernel<<<grid_dim, default_block_size>>>( k, num_rows, subspace_dim, nrhs, as_cuda_type(residual->get_const_values()), residual->get_stride(), as_cuda_type(c->get_const_values()), c->get_stride(), as_cuda_type(g->get_const_values()), g->get_stride(), as_cuda_type(v->get_values()), v->get_stride(), as_cuda_type(stop_status->get_const_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_1_KERNEL); template <typename ValueType> void step_2(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* omega, const matrix::Dense<ValueType>* preconditioned_vector, const matrix::Dense<ValueType>* c, matrix::Dense<ValueType>* u, const array<stopping_status>* stop_status) { if (nrhs == 0) { return; } const auto num_rows = preconditioned_vector->get_size()[0]; const auto subspace_dim = u->get_size()[1] / nrhs; const auto grid_dim = ceildiv(nrhs * num_rows, default_block_size); step_2_kernel<<<grid_dim, default_block_size>>>( k, num_rows, subspace_dim, nrhs, as_cuda_type(omega->get_const_values()), as_cuda_type(preconditioned_vector->get_const_values()), preconditioned_vector->get_stride(), as_cuda_type(c->get_const_values()), c->get_stride(), as_cuda_type(u->get_values()), u->get_stride(), as_cuda_type(stop_status->get_const_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_2_KERNEL); template <typename ValueType> void step_3(std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* u, matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* alpha, matrix::Dense<ValueType>* residual, matrix::Dense<ValueType>* x, const array<stopping_status>* stop_status) { update_g_and_u(exec, nrhs, k, p, m, alpha, g, g_k, u, stop_status); update_m(exec, nrhs, k, p, g_k, m, stop_status); update_x_r_and_f(exec, nrhs, k, m, g, u, f, residual, x, stop_status); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_3_KERNEL); template <typename ValueType> void compute_omega( std::shared_ptr<const CudaExecutor> exec, const size_type nrhs, const remove_complex<ValueType> kappa, const matrix::Dense<ValueType>* tht, const matrix::Dense<remove_complex<ValueType>>* residual_norm, matrix::Dense<ValueType>* omega, const array<stopping_status>* stop_status) { const auto grid_dim = ceildiv(nrhs, config::warp_size); compute_omega_kernel<<<grid_dim, config::warp_size>>>( nrhs, kappa, as_cuda_type(tht->get_const_values()), as_cuda_type(residual_norm->get_const_values()), as_cuda_type(omega->get_values()), as_cuda_type(stop_status->get_const_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_COMPUTE_OMEGA_KERNEL); } // namespace idr } // namespace cuda } // namespace kernels } // namespace gko
30f95129a6071a85e09218ed59b3b7da41645109.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2021 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for * license information. */ #ifdef USE_CUDA_EXP #include "cuda_data_partition.hpp" #include <LightGBM/cuda/cuda_algorithms.hpp> #include <LightGBM/tree.h> #include <algorithm> #include <vector> namespace LightGBM { __global__ void FillDataIndicesBeforeTrainKernel(const data_size_t num_data, data_size_t* data_indices, int* cuda_data_index_to_leaf_index) { const unsigned int data_index = threadIdx.x + blockIdx.x * blockDim.x; if (data_index < num_data) { data_indices[data_index] = data_index; cuda_data_index_to_leaf_index[data_index] = 0; } } __global__ void FillDataIndexToLeafIndexKernel( const data_size_t num_data, const data_size_t* data_indices, int* data_index_to_leaf_index) { const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x); if (data_index < num_data) { data_index_to_leaf_index[data_indices[data_index]] = 0; } } void CUDADataPartition::LaunchFillDataIndicesBeforeTrain() { const data_size_t num_data_in_root = root_num_data(); const int num_blocks = (num_data_in_root + FILL_INDICES_BLOCK_SIZE_DATA_PARTITION - 1) / FILL_INDICES_BLOCK_SIZE_DATA_PARTITION; hipLaunchKernelGGL(( FillDataIndicesBeforeTrainKernel), dim3(num_blocks), dim3(FILL_INDICES_BLOCK_SIZE_DATA_PARTITION), 0, 0, num_data_in_root, cuda_data_indices_, cuda_data_index_to_leaf_index_); } void CUDADataPartition::LaunchFillDataIndexToLeafIndex() { const data_size_t num_data_in_root = root_num_data(); const int num_blocks = (num_data_in_root + FILL_INDICES_BLOCK_SIZE_DATA_PARTITION - 1) / FILL_INDICES_BLOCK_SIZE_DATA_PARTITION; hipLaunchKernelGGL(( FillDataIndexToLeafIndexKernel), dim3(num_blocks), dim3(FILL_INDICES_BLOCK_SIZE_DATA_PARTITION), 0, 0, num_data_in_root, cuda_data_indices_, cuda_data_index_to_leaf_index_); } __device__ __forceinline__ void PrepareOffset(const data_size_t num_data_in_leaf, uint16_t* block_to_left_offset, data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer, const uint16_t thread_to_left_offset_cnt, uint16_t* shared_mem_buffer) { const unsigned int threadIdx_x = threadIdx.x; const unsigned int blockDim_x = blockDim.x; const uint16_t thread_to_left_offset = ShufflePrefixSum<uint16_t>(thread_to_left_offset_cnt, shared_mem_buffer); const data_size_t num_data_in_block = (blockIdx.x + 1) * blockDim_x <= num_data_in_leaf ? static_cast<data_size_t>(blockDim_x) : num_data_in_leaf - static_cast<data_size_t>(blockIdx.x * blockDim_x); if (static_cast<data_size_t>(threadIdx_x) < num_data_in_block) { block_to_left_offset[threadIdx_x] = thread_to_left_offset; } if (threadIdx_x == blockDim_x - 1) { if (num_data_in_block > 0) { const data_size_t data_to_left = static_cast<data_size_t>(thread_to_left_offset); block_to_left_offset_buffer[blockIdx.x + 1] = data_to_left; block_to_right_offset_buffer[blockIdx.x + 1] = num_data_in_block - data_to_left; } else { block_to_left_offset_buffer[blockIdx.x + 1] = 0; block_to_right_offset_buffer[blockIdx.x + 1] = 0; } } } template <typename T> __device__ bool CUDAFindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } #define UpdateDataIndexToLeafIndexKernel_PARAMS \ const BIN_TYPE* column_data, \ const data_size_t num_data_in_leaf, \ const data_size_t* data_indices_in_leaf, \ const uint32_t th, \ const uint32_t t_zero_bin, \ const uint32_t max_bin, \ const uint32_t min_bin, \ const int left_leaf_index, \ const int right_leaf_index, \ const int default_leaf_index, \ const int missing_default_leaf_index #define UpdateDataIndexToLeafIndex_ARGS \ column_data, \ num_data_in_leaf, \ data_indices_in_leaf, th, \ t_zero_bin, \ max_bin, \ min_bin, \ left_leaf_index, \ right_leaf_index, \ default_leaf_index, \ missing_default_leaf_index template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, bool USE_MIN_BIN, typename BIN_TYPE> __global__ void UpdateDataIndexToLeafIndexKernel( UpdateDataIndexToLeafIndexKernel_PARAMS, int* cuda_data_index_to_leaf_index) { const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x; if (local_data_index < num_data_in_leaf) { const unsigned int global_data_index = data_indices_in_leaf[local_data_index]; const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]); if (!MIN_IS_MAX) { if ((MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) || (MISSING_IS_NA && !MFB_IS_NA && bin == max_bin)) { cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index; } else if ((USE_MIN_BIN && (bin < min_bin || bin > max_bin)) || (!USE_MIN_BIN && bin == 0)) { if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO && MFB_IS_ZERO)) { cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index; } else { cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index; } } else if (bin > th) { cuda_data_index_to_leaf_index[global_data_index] = right_leaf_index; } else { cuda_data_index_to_leaf_index[global_data_index] = left_leaf_index; } } else { if (MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) { cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index; } else if (bin != max_bin) { if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO && MFB_IS_ZERO)) { cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index; } else { cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index; } } else { if (MISSING_IS_NA && !MFB_IS_NA) { cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index; } else { if (!MAX_TO_LEFT) { cuda_data_index_to_leaf_index[global_data_index] = right_leaf_index; } else { cuda_data_index_to_leaf_index[global_data_index] = left_leaf_index; } } } } } } template <typename BIN_TYPE> void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel( UpdateDataIndexToLeafIndexKernel_PARAMS, const bool missing_is_zero, const bool missing_is_na, const bool mfb_is_zero, const bool mfb_is_na, const bool max_to_left, const bool is_single_feature_in_column) { if (min_bin < max_bin) { if (!missing_is_zero) { LaunchUpdateDataIndexToLeafIndexKernel_Inner0<false, false, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column); } else { LaunchUpdateDataIndexToLeafIndexKernel_Inner0<false, true, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column); } } else { if (!missing_is_zero) { LaunchUpdateDataIndexToLeafIndexKernel_Inner0<true, false, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column); } else { LaunchUpdateDataIndexToLeafIndexKernel_Inner0<true, true, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column); } } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, typename BIN_TYPE> void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner0( UpdateDataIndexToLeafIndexKernel_PARAMS, const bool missing_is_na, const bool mfb_is_zero, const bool mfb_is_na, const bool max_to_left, const bool is_single_feature_in_column) { if (!missing_is_na) { LaunchUpdateDataIndexToLeafIndexKernel_Inner1<MIN_IS_MAX, MISSING_IS_ZERO, false, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column); } else { LaunchUpdateDataIndexToLeafIndexKernel_Inner1<MIN_IS_MAX, MISSING_IS_ZERO, true, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, typename BIN_TYPE> void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner1( UpdateDataIndexToLeafIndexKernel_PARAMS, const bool mfb_is_zero, const bool mfb_is_na, const bool max_to_left, const bool is_single_feature_in_column) { if (!mfb_is_zero) { LaunchUpdateDataIndexToLeafIndexKernel_Inner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, false, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, mfb_is_na, max_to_left, is_single_feature_in_column); } else { LaunchUpdateDataIndexToLeafIndexKernel_Inner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, true, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, mfb_is_na, max_to_left, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, typename BIN_TYPE> void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner2( UpdateDataIndexToLeafIndexKernel_PARAMS, const bool mfb_is_na, const bool max_to_left, const bool is_single_feature_in_column) { if (!mfb_is_na) { LaunchUpdateDataIndexToLeafIndexKernel_Inner3<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, false, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, max_to_left, is_single_feature_in_column); } else { LaunchUpdateDataIndexToLeafIndexKernel_Inner3<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, true, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, max_to_left, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, typename BIN_TYPE> void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner3( UpdateDataIndexToLeafIndexKernel_PARAMS, const bool max_to_left, const bool is_single_feature_in_column) { if (!max_to_left) { LaunchUpdateDataIndexToLeafIndexKernel_Inner4<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, false, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, is_single_feature_in_column); } else { LaunchUpdateDataIndexToLeafIndexKernel_Inner4<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, true, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, typename BIN_TYPE> void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner4( UpdateDataIndexToLeafIndexKernel_PARAMS, const bool is_single_feature_in_column) { if (!is_single_feature_in_column) { hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, true, BIN_TYPE>) , dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_ARGS, cuda_data_index_to_leaf_index_); } else { hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, false, BIN_TYPE>) , dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_ARGS, cuda_data_index_to_leaf_index_); } } #define GenDataToLeftBitVectorKernel_PARMS \ const BIN_TYPE* column_data, \ const data_size_t num_data_in_leaf, \ const data_size_t* data_indices_in_leaf, \ const uint32_t th, \ const uint32_t t_zero_bin, \ const uint32_t max_bin, \ const uint32_t min_bin, \ const uint8_t split_default_to_left, \ const uint8_t split_missing_default_to_left #define GenBitVector_ARGS \ column_data, \ num_data_in_leaf, \ data_indices_in_leaf, \ th, \ t_zero_bin, \ max_bin, \ min_bin, \ split_default_to_left, \ split_missing_default_to_left template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, bool USE_MIN_BIN, typename BIN_TYPE> __global__ void GenDataToLeftBitVectorKernel( GenDataToLeftBitVectorKernel_PARMS, uint16_t* block_to_left_offset, data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer) { __shared__ uint16_t shared_mem_buffer[32]; uint16_t thread_to_left_offset_cnt = 0; const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x; if (local_data_index < num_data_in_leaf) { const unsigned int global_data_index = data_indices_in_leaf[local_data_index]; const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]); if (!MIN_IS_MAX) { if ((MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) || (MISSING_IS_NA && !MFB_IS_NA && bin == max_bin)) { thread_to_left_offset_cnt = split_missing_default_to_left; } else if ((USE_MIN_BIN && (bin < min_bin || bin > max_bin)) || (!USE_MIN_BIN && bin == 0)) { if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO || MFB_IS_ZERO)) { thread_to_left_offset_cnt = split_missing_default_to_left; } else { thread_to_left_offset_cnt = split_default_to_left; } } else if (bin <= th) { thread_to_left_offset_cnt = 1; } } else { if (MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) { thread_to_left_offset_cnt = split_missing_default_to_left; } else if (bin != max_bin) { if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO && MFB_IS_ZERO)) { thread_to_left_offset_cnt = split_missing_default_to_left; } else { thread_to_left_offset_cnt = split_default_to_left; } } else { if (MISSING_IS_NA && !MFB_IS_NA) { thread_to_left_offset_cnt = split_missing_default_to_left; } else if (MAX_TO_LEFT) { thread_to_left_offset_cnt = 1; } } } } __syncthreads(); PrepareOffset(num_data_in_leaf, block_to_left_offset + blockIdx.x * blockDim.x, block_to_left_offset_buffer, block_to_right_offset_buffer, thread_to_left_offset_cnt, shared_mem_buffer); } template <typename BIN_TYPE> void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner( GenDataToLeftBitVectorKernel_PARMS, const bool missing_is_zero, const bool missing_is_na, const bool mfb_is_zero, const bool mfb_is_na, const bool max_bin_to_left, const bool is_single_feature_in_column) { if (min_bin < max_bin) { if (!missing_is_zero) { LaunchGenDataToLeftBitVectorKernelInner0<false, false, BIN_TYPE> (GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } else { LaunchGenDataToLeftBitVectorKernelInner0<false, true, BIN_TYPE> (GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } } else { if (!missing_is_zero) { LaunchGenDataToLeftBitVectorKernelInner0<true, false, BIN_TYPE> (GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } else { LaunchGenDataToLeftBitVectorKernelInner0<true, true, BIN_TYPE> (GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, typename BIN_TYPE> void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner0( GenDataToLeftBitVectorKernel_PARMS, const bool missing_is_na, const bool mfb_is_zero, const bool mfb_is_na, const bool max_bin_to_left, const bool is_single_feature_in_column) { if (!missing_is_na) { LaunchGenDataToLeftBitVectorKernelInner1<MIN_IS_MAX, MISSING_IS_ZERO, false, BIN_TYPE> (GenBitVector_ARGS, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } else { LaunchGenDataToLeftBitVectorKernelInner1<MIN_IS_MAX, MISSING_IS_ZERO, true, BIN_TYPE> (GenBitVector_ARGS, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, typename BIN_TYPE> void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner1( GenDataToLeftBitVectorKernel_PARMS, const bool mfb_is_zero, const bool mfb_is_na, const bool max_bin_to_left, const bool is_single_feature_in_column) { if (!mfb_is_zero) { LaunchGenDataToLeftBitVectorKernelInner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, false, BIN_TYPE> (GenBitVector_ARGS, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } else { LaunchGenDataToLeftBitVectorKernelInner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, true, BIN_TYPE> (GenBitVector_ARGS, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, typename BIN_TYPE> void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner2( GenDataToLeftBitVectorKernel_PARMS, const bool mfb_is_na, const bool max_bin_to_left, const bool is_single_feature_in_column) { if (!mfb_is_na) { LaunchGenDataToLeftBitVectorKernelInner3 <MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, false, BIN_TYPE> (GenBitVector_ARGS, max_bin_to_left, is_single_feature_in_column); } else { LaunchGenDataToLeftBitVectorKernelInner3 <MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, true, BIN_TYPE> (GenBitVector_ARGS, max_bin_to_left, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, typename BIN_TYPE> void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner3( GenDataToLeftBitVectorKernel_PARMS, const bool max_bin_to_left, const bool is_single_feature_in_column) { if (!max_bin_to_left) { LaunchGenDataToLeftBitVectorKernelInner4 <MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, false, BIN_TYPE> (GenBitVector_ARGS, is_single_feature_in_column); } else { LaunchGenDataToLeftBitVectorKernelInner4 <MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, true, BIN_TYPE> (GenBitVector_ARGS, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, typename BIN_TYPE> void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner4( GenDataToLeftBitVectorKernel_PARMS, const bool is_single_feature_in_column) { if (!is_single_feature_in_column) { hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel <MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, true, BIN_TYPE>) , dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_ARGS, cuda_block_to_left_offset_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_); } else { hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel <MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, false, BIN_TYPE>) , dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_ARGS, cuda_block_to_left_offset_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_); } } void CUDADataPartition::LaunchGenDataToLeftBitVectorKernel( const data_size_t num_data_in_leaf, const int split_feature_index, const uint32_t split_threshold, const uint8_t split_default_left, const data_size_t leaf_data_start, const int left_leaf_index, const int right_leaf_index) { const bool missing_is_zero = static_cast<bool>(cuda_column_data_->feature_missing_is_zero(split_feature_index)); const bool missing_is_na = static_cast<bool>(cuda_column_data_->feature_missing_is_na(split_feature_index)); const bool mfb_is_zero = static_cast<bool>(cuda_column_data_->feature_mfb_is_zero(split_feature_index)); const bool mfb_is_na = static_cast<bool>(cuda_column_data_->feature_mfb_is_na(split_feature_index)); const bool is_single_feature_in_column = is_single_feature_in_column_[split_feature_index]; const uint32_t default_bin = cuda_column_data_->feature_default_bin(split_feature_index); const uint32_t most_freq_bin = cuda_column_data_->feature_most_freq_bin(split_feature_index); const uint32_t min_bin = is_single_feature_in_column ? 1 : cuda_column_data_->feature_min_bin(split_feature_index); const uint32_t max_bin = cuda_column_data_->feature_max_bin(split_feature_index); uint32_t th = split_threshold + min_bin; uint32_t t_zero_bin = min_bin + default_bin; if (most_freq_bin == 0) { --th; --t_zero_bin; } uint8_t split_default_to_left = 0; uint8_t split_missing_default_to_left = 0; int default_leaf_index = right_leaf_index; int missing_default_leaf_index = right_leaf_index; if (most_freq_bin <= split_threshold) { split_default_to_left = 1; default_leaf_index = left_leaf_index; } if (missing_is_zero || missing_is_na) { if (split_default_left) { split_missing_default_to_left = 1; missing_default_leaf_index = left_leaf_index; } } const int column_index = cuda_column_data_->feature_to_column(split_feature_index); const uint8_t bit_type = cuda_column_data_->column_bit_type(column_index); const bool max_bin_to_left = (max_bin <= th); const data_size_t* data_indices_in_leaf = cuda_data_indices_ + leaf_data_start; const void* column_data_pointer = cuda_column_data_->GetColumnData(column_index); if (bit_type == 8) { const uint8_t* column_data = reinterpret_cast<const uint8_t*>(column_data_pointer); LaunchGenDataToLeftBitVectorKernelInner<uint8_t>( GenBitVector_ARGS, missing_is_zero, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); LaunchUpdateDataIndexToLeafIndexKernel<uint8_t>( UpdateDataIndexToLeafIndex_ARGS, missing_is_zero, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } else if (bit_type == 16) { const uint16_t* column_data = reinterpret_cast<const uint16_t*>(column_data_pointer); LaunchGenDataToLeftBitVectorKernelInner<uint16_t>( GenBitVector_ARGS, missing_is_zero, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); LaunchUpdateDataIndexToLeafIndexKernel<uint16_t>( UpdateDataIndexToLeafIndex_ARGS, missing_is_zero, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } else if (bit_type == 32) { const uint32_t* column_data = reinterpret_cast<const uint32_t*>(column_data_pointer); LaunchGenDataToLeftBitVectorKernelInner<uint32_t>( GenBitVector_ARGS, missing_is_zero, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); LaunchUpdateDataIndexToLeafIndexKernel<uint32_t>( UpdateDataIndexToLeafIndex_ARGS, missing_is_zero, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } } #undef UpdateDataIndexToLeafIndexKernel_PARAMS #undef UpdateDataIndexToLeafIndex_ARGS #undef GenDataToLeftBitVectorKernel_PARMS #undef GenBitVector_ARGS template <typename BIN_TYPE, bool USE_MIN_BIN> __global__ void UpdateDataIndexToLeafIndexKernel_Categorical( const data_size_t num_data_in_leaf, const data_size_t* data_indices_in_leaf, const uint32_t* bitset, const int bitset_len, const BIN_TYPE* column_data, // values from feature const uint32_t max_bin, const uint32_t min_bin, const int8_t mfb_offset, int* cuda_data_index_to_leaf_index, const int left_leaf_index, const int right_leaf_index, const int default_leaf_index) { const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x; if (local_data_index < num_data_in_leaf) { const unsigned int global_data_index = data_indices_in_leaf[local_data_index]; const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]); if (USE_MIN_BIN && (bin < min_bin || bin > max_bin)) { cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index; } else if (!USE_MIN_BIN && bin == 0) { cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index; } else if (CUDAFindInBitset(bitset, bitset_len, bin - min_bin + mfb_offset)) { cuda_data_index_to_leaf_index[global_data_index] = left_leaf_index; } else { cuda_data_index_to_leaf_index[global_data_index] = right_leaf_index; } } } // for categorical features template <typename BIN_TYPE, bool USE_MIN_BIN> __global__ void GenDataToLeftBitVectorKernel_Categorical( const data_size_t num_data_in_leaf, const data_size_t* data_indices_in_leaf, const uint32_t* bitset, int bitset_len, const BIN_TYPE* column_data, // values from feature const uint32_t max_bin, const uint32_t min_bin, const int8_t mfb_offset, const uint8_t split_default_to_left, uint16_t* block_to_left_offset, data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer) { __shared__ uint16_t shared_mem_buffer[32]; uint16_t thread_to_left_offset_cnt = 0; const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x; if (local_data_index < num_data_in_leaf) { const unsigned int global_data_index = data_indices_in_leaf[local_data_index]; const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]); if (USE_MIN_BIN && (bin < min_bin || bin > max_bin)) { thread_to_left_offset_cnt = split_default_to_left; } else if (!USE_MIN_BIN && bin == 0) { thread_to_left_offset_cnt = split_default_to_left; } else if (CUDAFindInBitset(bitset, bitset_len, bin - min_bin + mfb_offset)) { thread_to_left_offset_cnt = 1; } } __syncthreads(); PrepareOffset(num_data_in_leaf, block_to_left_offset + blockIdx.x * blockDim.x, block_to_left_offset_buffer, block_to_right_offset_buffer, thread_to_left_offset_cnt, shared_mem_buffer); } #define GenBitVector_Categorical_ARGS \ num_data_in_leaf, data_indices_in_leaf, \ bitset, bitset_len, \ column_data, max_bin, min_bin, mfb_offset, split_default_to_left, \ cuda_block_to_left_offset_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_ #define UpdateDataIndexToLeafIndex_Categorical_ARGS \ num_data_in_leaf, data_indices_in_leaf, \ bitset, bitset_len, \ column_data, max_bin, min_bin, mfb_offset, \ cuda_data_index_to_leaf_index_, left_leaf_index, right_leaf_index, default_leaf_index void CUDADataPartition::LaunchGenDataToLeftBitVectorCategoricalKernel( const data_size_t num_data_in_leaf, const int split_feature_index, const uint32_t* bitset, const int bitset_len, const uint8_t split_default_left, const data_size_t leaf_data_start, const int left_leaf_index, const int right_leaf_index) { const data_size_t* data_indices_in_leaf = cuda_data_indices_ + leaf_data_start; const int column_index = cuda_column_data_->feature_to_column(split_feature_index); const uint8_t bit_type = cuda_column_data_->column_bit_type(column_index); const bool is_single_feature_in_column = is_single_feature_in_column_[split_feature_index]; const uint32_t min_bin = is_single_feature_in_column ? 1 : cuda_column_data_->feature_min_bin(split_feature_index); const uint32_t max_bin = cuda_column_data_->feature_max_bin(split_feature_index); const uint32_t most_freq_bin = cuda_column_data_->feature_most_freq_bin(split_feature_index); const uint32_t default_bin = cuda_column_data_->feature_default_bin(split_feature_index); const void* column_data_pointer = cuda_column_data_->GetColumnData(column_index); const int8_t mfb_offset = static_cast<int8_t>(most_freq_bin == 0); std::vector<uint32_t> host_bitset(bitset_len, 0); CopyFromCUDADeviceToHost<uint32_t>(host_bitset.data(), bitset, bitset_len, __FILE__, __LINE__); uint8_t split_default_to_left = 0; int default_leaf_index = right_leaf_index; if (most_freq_bin > 0 && Common::FindInBitset(host_bitset.data(), bitset_len, most_freq_bin)) { split_default_to_left = 1; default_leaf_index = left_leaf_index; } if (bit_type == 8) { const uint8_t* column_data = reinterpret_cast<const uint8_t*>(column_data_pointer); if (is_single_feature_in_column) { hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel_Categorical<uint8_t, false>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_Categorical_ARGS); hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel_Categorical<uint8_t, false>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_Categorical_ARGS); } else { hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel_Categorical<uint8_t, true>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_Categorical_ARGS); hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel_Categorical<uint8_t, true>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_Categorical_ARGS); } } else if (bit_type == 16) { const uint16_t* column_data = reinterpret_cast<const uint16_t*>(column_data_pointer); if (is_single_feature_in_column) { hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel_Categorical<uint16_t, false>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_Categorical_ARGS); hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel_Categorical<uint16_t, false>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_Categorical_ARGS); } else { hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel_Categorical<uint16_t, true>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_Categorical_ARGS); hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel_Categorical<uint16_t, true>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_Categorical_ARGS); } } else if (bit_type == 32) { const uint32_t* column_data = reinterpret_cast<const uint32_t*>(column_data_pointer); if (is_single_feature_in_column) { hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel_Categorical<uint32_t, false>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_Categorical_ARGS); hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel_Categorical<uint32_t, false>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_Categorical_ARGS); } else { hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel_Categorical<uint32_t, true>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_Categorical_ARGS); hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel_Categorical<uint32_t, true>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_Categorical_ARGS); } } } #undef GenBitVector_Categorical_ARGS #undef UpdateDataIndexToLeafIndex_Categorical_ARGS __global__ void AggregateBlockOffsetKernel0( const int left_leaf_index, const int right_leaf_index, data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer, data_size_t* cuda_leaf_data_start, data_size_t* cuda_leaf_data_end, data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices, const data_size_t num_blocks) { __shared__ uint32_t shared_mem_buffer[32]; __shared__ uint32_t to_left_total_count; const data_size_t num_data_in_leaf = cuda_leaf_num_data[left_leaf_index]; const unsigned int blockDim_x = blockDim.x; const unsigned int threadIdx_x = threadIdx.x; const data_size_t num_blocks_plus_1 = num_blocks + 1; const uint32_t num_blocks_per_thread = (num_blocks_plus_1 + blockDim_x - 1) / blockDim_x; const uint32_t remain = num_blocks_plus_1 - ((num_blocks_per_thread - 1) * blockDim_x); const uint32_t remain_offset = remain * num_blocks_per_thread; uint32_t thread_start_block_index = 0; uint32_t thread_end_block_index = 0; if (threadIdx_x < remain) { thread_start_block_index = threadIdx_x * num_blocks_per_thread; thread_end_block_index = min(thread_start_block_index + num_blocks_per_thread, num_blocks_plus_1); } else { thread_start_block_index = remain_offset + (num_blocks_per_thread - 1) * (threadIdx_x - remain); thread_end_block_index = min(thread_start_block_index + num_blocks_per_thread - 1, num_blocks_plus_1); } if (threadIdx.x == 0) { block_to_right_offset_buffer[0] = 0; } __syncthreads(); for (uint32_t block_index = thread_start_block_index + 1; block_index < thread_end_block_index; ++block_index) { block_to_left_offset_buffer[block_index] += block_to_left_offset_buffer[block_index - 1]; block_to_right_offset_buffer[block_index] += block_to_right_offset_buffer[block_index - 1]; } __syncthreads(); uint32_t block_to_left_offset = 0; uint32_t block_to_right_offset = 0; if (thread_start_block_index < thread_end_block_index && thread_start_block_index > 1) { block_to_left_offset = block_to_left_offset_buffer[thread_start_block_index - 1]; block_to_right_offset = block_to_right_offset_buffer[thread_start_block_index - 1]; } block_to_left_offset = ShufflePrefixSum<uint32_t>(block_to_left_offset, shared_mem_buffer); __syncthreads(); block_to_right_offset = ShufflePrefixSum<uint32_t>(block_to_right_offset, shared_mem_buffer); if (threadIdx_x == blockDim_x - 1) { to_left_total_count = block_to_left_offset + block_to_left_offset_buffer[num_blocks]; } __syncthreads(); const uint32_t to_left_thread_block_offset = block_to_left_offset; const uint32_t to_right_thread_block_offset = block_to_right_offset + to_left_total_count; for (uint32_t block_index = thread_start_block_index; block_index < thread_end_block_index; ++block_index) { block_to_left_offset_buffer[block_index] += to_left_thread_block_offset; block_to_right_offset_buffer[block_index] += to_right_thread_block_offset; } __syncthreads(); if (blockIdx.x == 0 && threadIdx.x == 0) { const data_size_t old_leaf_data_end = cuda_leaf_data_end[left_leaf_index]; cuda_leaf_data_end[left_leaf_index] = cuda_leaf_data_start[left_leaf_index] + static_cast<data_size_t>(to_left_total_count); cuda_leaf_num_data[left_leaf_index] = static_cast<data_size_t>(to_left_total_count); cuda_leaf_data_start[right_leaf_index] = cuda_leaf_data_end[left_leaf_index]; cuda_leaf_data_end[right_leaf_index] = old_leaf_data_end; cuda_leaf_num_data[right_leaf_index] = num_data_in_leaf - static_cast<data_size_t>(to_left_total_count); } } __global__ void AggregateBlockOffsetKernel1( const int left_leaf_index, const int right_leaf_index, data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer, data_size_t* cuda_leaf_data_start, data_size_t* cuda_leaf_data_end, data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices, const data_size_t num_blocks) { __shared__ uint32_t shared_mem_buffer[32]; __shared__ uint32_t to_left_total_count; const data_size_t num_data_in_leaf = cuda_leaf_num_data[left_leaf_index]; const unsigned int threadIdx_x = threadIdx.x; uint32_t block_to_left_offset = 0; uint32_t block_to_right_offset = 0; if (threadIdx_x < static_cast<unsigned int>(num_blocks)) { block_to_left_offset = block_to_left_offset_buffer[threadIdx_x + 1]; block_to_right_offset = block_to_right_offset_buffer[threadIdx_x + 1]; } block_to_left_offset = ShufflePrefixSum<uint32_t>(block_to_left_offset, shared_mem_buffer); __syncthreads(); block_to_right_offset = ShufflePrefixSum<uint32_t>(block_to_right_offset, shared_mem_buffer); if (threadIdx.x == blockDim.x - 1) { to_left_total_count = block_to_left_offset; } __syncthreads(); if (threadIdx_x < static_cast<unsigned int>(num_blocks)) { block_to_left_offset_buffer[threadIdx_x + 1] = block_to_left_offset; block_to_right_offset_buffer[threadIdx_x + 1] = block_to_right_offset + to_left_total_count; } if (threadIdx_x == 0) { block_to_right_offset_buffer[0] = to_left_total_count; } __syncthreads(); if (blockIdx.x == 0 && threadIdx.x == 0) { const data_size_t old_leaf_data_end = cuda_leaf_data_end[left_leaf_index]; cuda_leaf_data_end[left_leaf_index] = cuda_leaf_data_start[left_leaf_index] + static_cast<data_size_t>(to_left_total_count); cuda_leaf_num_data[left_leaf_index] = static_cast<data_size_t>(to_left_total_count); cuda_leaf_data_start[right_leaf_index] = cuda_leaf_data_end[left_leaf_index]; cuda_leaf_data_end[right_leaf_index] = old_leaf_data_end; cuda_leaf_num_data[right_leaf_index] = num_data_in_leaf - static_cast<data_size_t>(to_left_total_count); } } __global__ void SplitTreeStructureKernel(const int left_leaf_index, const int right_leaf_index, data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer, data_size_t* cuda_leaf_data_start, data_size_t* cuda_leaf_data_end, data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices, const CUDASplitInfo* best_split_info, // for leaf splits information update CUDALeafSplitsStruct* smaller_leaf_splits, CUDALeafSplitsStruct* larger_leaf_splits, const int num_total_bin, hist_t* cuda_hist, hist_t** cuda_hist_pool, double* cuda_leaf_output, int* cuda_split_info_buffer) { const unsigned int to_left_total_cnt = cuda_leaf_num_data[left_leaf_index]; double* cuda_split_info_buffer_for_hessians = reinterpret_cast<double*>(cuda_split_info_buffer + 8); const unsigned int global_thread_index = blockIdx.x * blockDim.x + threadIdx.x; if (global_thread_index == 0) { cuda_leaf_output[left_leaf_index] = best_split_info->left_value; } else if (global_thread_index == 1) { cuda_leaf_output[right_leaf_index] = best_split_info->right_value; } else if (global_thread_index == 2) { cuda_split_info_buffer[0] = left_leaf_index; } else if (global_thread_index == 3) { cuda_split_info_buffer[1] = cuda_leaf_num_data[left_leaf_index]; } else if (global_thread_index == 4) { cuda_split_info_buffer[2] = cuda_leaf_data_start[left_leaf_index]; } else if (global_thread_index == 5) { cuda_split_info_buffer[3] = right_leaf_index; } else if (global_thread_index == 6) { cuda_split_info_buffer[4] = cuda_leaf_num_data[right_leaf_index]; } else if (global_thread_index == 7) { cuda_split_info_buffer[5] = cuda_leaf_data_start[right_leaf_index]; } else if (global_thread_index == 8) { cuda_split_info_buffer_for_hessians[0] = best_split_info->left_sum_hessians; cuda_split_info_buffer_for_hessians[2] = best_split_info->left_sum_gradients; } else if (global_thread_index == 9) { cuda_split_info_buffer_for_hessians[1] = best_split_info->right_sum_hessians; cuda_split_info_buffer_for_hessians[3] = best_split_info->right_sum_gradients; } if (cuda_leaf_num_data[left_leaf_index] < cuda_leaf_num_data[right_leaf_index]) { if (global_thread_index == 0) { hist_t* parent_hist_ptr = cuda_hist_pool[left_leaf_index]; cuda_hist_pool[right_leaf_index] = parent_hist_ptr; cuda_hist_pool[left_leaf_index] = cuda_hist + 2 * right_leaf_index * num_total_bin; smaller_leaf_splits->hist_in_leaf = cuda_hist_pool[left_leaf_index]; larger_leaf_splits->hist_in_leaf = cuda_hist_pool[right_leaf_index]; } else if (global_thread_index == 1) { smaller_leaf_splits->sum_of_gradients = best_split_info->left_sum_gradients; } else if (global_thread_index == 2) { smaller_leaf_splits->sum_of_hessians = best_split_info->left_sum_hessians; } else if (global_thread_index == 3) { smaller_leaf_splits->num_data_in_leaf = to_left_total_cnt; } else if (global_thread_index == 4) { smaller_leaf_splits->gain = best_split_info->left_gain; } else if (global_thread_index == 5) { smaller_leaf_splits->leaf_value = best_split_info->left_value; } else if (global_thread_index == 6) { smaller_leaf_splits->data_indices_in_leaf = cuda_data_indices; } else if (global_thread_index == 7) { larger_leaf_splits->leaf_index = right_leaf_index; } else if (global_thread_index == 8) { larger_leaf_splits->sum_of_gradients = best_split_info->right_sum_gradients; } else if (global_thread_index == 9) { larger_leaf_splits->sum_of_hessians = best_split_info->right_sum_hessians; } else if (global_thread_index == 10) { larger_leaf_splits->num_data_in_leaf = cuda_leaf_num_data[right_leaf_index]; } else if (global_thread_index == 11) { larger_leaf_splits->gain = best_split_info->right_gain; } else if (global_thread_index == 12) { larger_leaf_splits->leaf_value = best_split_info->right_value; } else if (global_thread_index == 13) { larger_leaf_splits->data_indices_in_leaf = cuda_data_indices + cuda_leaf_num_data[left_leaf_index]; } else if (global_thread_index == 14) { cuda_split_info_buffer[6] = left_leaf_index; } else if (global_thread_index == 15) { cuda_split_info_buffer[7] = right_leaf_index; } else if (global_thread_index == 16) { smaller_leaf_splits->leaf_index = left_leaf_index; } } else { if (global_thread_index == 0) { larger_leaf_splits->leaf_index = left_leaf_index; } else if (global_thread_index == 1) { larger_leaf_splits->sum_of_gradients = best_split_info->left_sum_gradients; } else if (global_thread_index == 2) { larger_leaf_splits->sum_of_hessians = best_split_info->left_sum_hessians; } else if (global_thread_index == 3) { larger_leaf_splits->num_data_in_leaf = to_left_total_cnt; } else if (global_thread_index == 4) { larger_leaf_splits->gain = best_split_info->left_gain; } else if (global_thread_index == 5) { larger_leaf_splits->leaf_value = best_split_info->left_value; } else if (global_thread_index == 6) { larger_leaf_splits->data_indices_in_leaf = cuda_data_indices; } else if (global_thread_index == 7) { smaller_leaf_splits->leaf_index = right_leaf_index; } else if (global_thread_index == 8) { smaller_leaf_splits->sum_of_gradients = best_split_info->right_sum_gradients; } else if (global_thread_index == 9) { smaller_leaf_splits->sum_of_hessians = best_split_info->right_sum_hessians; } else if (global_thread_index == 10) { smaller_leaf_splits->num_data_in_leaf = cuda_leaf_num_data[right_leaf_index]; } else if (global_thread_index == 11) { smaller_leaf_splits->gain = best_split_info->right_gain; } else if (global_thread_index == 12) { smaller_leaf_splits->leaf_value = best_split_info->right_value; } else if (global_thread_index == 13) { smaller_leaf_splits->data_indices_in_leaf = cuda_data_indices + cuda_leaf_num_data[left_leaf_index]; } else if (global_thread_index == 14) { cuda_hist_pool[right_leaf_index] = cuda_hist + 2 * right_leaf_index * num_total_bin; smaller_leaf_splits->hist_in_leaf = cuda_hist_pool[right_leaf_index]; } else if (global_thread_index == 15) { larger_leaf_splits->hist_in_leaf = cuda_hist_pool[left_leaf_index]; } else if (global_thread_index == 16) { cuda_split_info_buffer[6] = right_leaf_index; } else if (global_thread_index == 17) { cuda_split_info_buffer[7] = left_leaf_index; } } } __global__ void SplitInnerKernel(const int left_leaf_index, const int right_leaf_index, const data_size_t* cuda_leaf_data_start, const data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices, const data_size_t* block_to_left_offset_buffer, const data_size_t* block_to_right_offset_buffer, const uint16_t* block_to_left_offset, data_size_t* out_data_indices_in_leaf) { const data_size_t leaf_num_data_offset = cuda_leaf_data_start[left_leaf_index]; const data_size_t num_data_in_leaf = cuda_leaf_num_data[left_leaf_index] + cuda_leaf_num_data[right_leaf_index]; const unsigned int threadIdx_x = threadIdx.x; const unsigned int blockDim_x = blockDim.x; const unsigned int global_thread_index = blockIdx.x * blockDim_x + threadIdx_x; const data_size_t* cuda_data_indices_in_leaf = cuda_data_indices + leaf_num_data_offset; const uint16_t* block_to_left_offset_ptr = block_to_left_offset + blockIdx.x * blockDim_x; const uint32_t to_right_block_offset = block_to_right_offset_buffer[blockIdx.x]; const uint32_t to_left_block_offset = block_to_left_offset_buffer[blockIdx.x]; data_size_t* left_out_data_indices_in_leaf = out_data_indices_in_leaf + to_left_block_offset; data_size_t* right_out_data_indices_in_leaf = out_data_indices_in_leaf + to_right_block_offset; if (static_cast<data_size_t>(global_thread_index) < num_data_in_leaf) { const uint32_t thread_to_left_offset = (threadIdx_x == 0 ? 0 : block_to_left_offset_ptr[threadIdx_x - 1]); const bool to_left = block_to_left_offset_ptr[threadIdx_x] > thread_to_left_offset; if (to_left) { left_out_data_indices_in_leaf[thread_to_left_offset] = cuda_data_indices_in_leaf[global_thread_index]; } else { const uint32_t thread_to_right_offset = threadIdx.x - thread_to_left_offset; right_out_data_indices_in_leaf[thread_to_right_offset] = cuda_data_indices_in_leaf[global_thread_index]; } } } __global__ void CopyDataIndicesKernel( const data_size_t num_data_in_leaf, const data_size_t* out_data_indices_in_leaf, data_size_t* cuda_data_indices) { const unsigned int threadIdx_x = threadIdx.x; const unsigned int global_thread_index = blockIdx.x * blockDim.x + threadIdx_x; if (global_thread_index < num_data_in_leaf) { cuda_data_indices[global_thread_index] = out_data_indices_in_leaf[global_thread_index]; } } void CUDADataPartition::LaunchSplitInnerKernel( const data_size_t num_data_in_leaf, const CUDASplitInfo* best_split_info, const int left_leaf_index, const int right_leaf_index, // for leaf splits information update CUDALeafSplitsStruct* smaller_leaf_splits, CUDALeafSplitsStruct* larger_leaf_splits, data_size_t* left_leaf_num_data_ref, data_size_t* right_leaf_num_data_ref, data_size_t* left_leaf_start_ref, data_size_t* right_leaf_start_ref, double* left_leaf_sum_of_hessians_ref, double* right_leaf_sum_of_hessians_ref, double* left_leaf_sum_of_gradients_ref, double* right_leaf_sum_of_gradients_ref) { int num_blocks_final_ref = grid_dim_ - 1; int num_blocks_final_aligned = 1; while (num_blocks_final_ref > 0) { num_blocks_final_aligned <<= 1; num_blocks_final_ref >>= 1; } global_timer.Start("CUDADataPartition::AggregateBlockOffsetKernel"); if (grid_dim_ > AGGREGATE_BLOCK_SIZE_DATA_PARTITION) { hipLaunchKernelGGL(( AggregateBlockOffsetKernel0), dim3(1), dim3(AGGREGATE_BLOCK_SIZE_DATA_PARTITION), 0, cuda_streams_[0], left_leaf_index, right_leaf_index, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_, cuda_leaf_data_start_, cuda_leaf_data_end_, cuda_leaf_num_data_, cuda_data_indices_, grid_dim_); } else { hipLaunchKernelGGL(( AggregateBlockOffsetKernel1), dim3(1), dim3(num_blocks_final_aligned), 0, cuda_streams_[0], left_leaf_index, right_leaf_index, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_, cuda_leaf_data_start_, cuda_leaf_data_end_, cuda_leaf_num_data_, cuda_data_indices_, grid_dim_); } SynchronizeCUDADevice(__FILE__, __LINE__); global_timer.Stop("CUDADataPartition::AggregateBlockOffsetKernel"); global_timer.Start("CUDADataPartition::SplitInnerKernel"); hipLaunchKernelGGL(( SplitInnerKernel), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[1], left_leaf_index, right_leaf_index, cuda_leaf_data_start_, cuda_leaf_num_data_, cuda_data_indices_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_, cuda_block_to_left_offset_, cuda_out_data_indices_in_leaf_); global_timer.Stop("CUDADataPartition::SplitInnerKernel"); SynchronizeCUDADevice(__FILE__, __LINE__); global_timer.Start("CUDADataPartition::SplitTreeStructureKernel"); hipLaunchKernelGGL(( SplitTreeStructureKernel), dim3(4), dim3(5), 0, cuda_streams_[0], left_leaf_index, right_leaf_index, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_, cuda_leaf_data_start_, cuda_leaf_data_end_, cuda_leaf_num_data_, cuda_out_data_indices_in_leaf_, best_split_info, smaller_leaf_splits, larger_leaf_splits, num_total_bin_, cuda_hist_, cuda_hist_pool_, cuda_leaf_output_, cuda_split_info_buffer_); global_timer.Stop("CUDADataPartition::SplitTreeStructureKernel"); std::vector<int> cpu_split_info_buffer(16); const double* cpu_sum_hessians_info = reinterpret_cast<const double*>(cpu_split_info_buffer.data() + 8); global_timer.Start("CUDADataPartition::CopyFromCUDADeviceToHostAsync"); CopyFromCUDADeviceToHostAsync<int>(cpu_split_info_buffer.data(), cuda_split_info_buffer_, 16, cuda_streams_[0], __FILE__, __LINE__); SynchronizeCUDADevice(__FILE__, __LINE__); global_timer.Stop("CUDADataPartition::CopyFromCUDADeviceToHostAsync"); const data_size_t left_leaf_num_data = cpu_split_info_buffer[1]; const data_size_t left_leaf_data_start = cpu_split_info_buffer[2]; const data_size_t right_leaf_num_data = cpu_split_info_buffer[4]; global_timer.Start("CUDADataPartition::CopyDataIndicesKernel"); hipLaunchKernelGGL(( CopyDataIndicesKernel), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[2], left_leaf_num_data + right_leaf_num_data, cuda_out_data_indices_in_leaf_, cuda_data_indices_ + left_leaf_data_start); global_timer.Stop("CUDADataPartition::CopyDataIndicesKernel"); const data_size_t right_leaf_data_start = cpu_split_info_buffer[5]; *left_leaf_num_data_ref = left_leaf_num_data; *left_leaf_start_ref = left_leaf_data_start; *right_leaf_num_data_ref = right_leaf_num_data; *right_leaf_start_ref = right_leaf_data_start; *left_leaf_sum_of_hessians_ref = cpu_sum_hessians_info[0]; *right_leaf_sum_of_hessians_ref = cpu_sum_hessians_info[1]; *left_leaf_sum_of_gradients_ref = cpu_sum_hessians_info[2]; *right_leaf_sum_of_gradients_ref = cpu_sum_hessians_info[3]; } template <bool USE_BAGGING> __global__ void AddPredictionToScoreKernel( const data_size_t* data_indices_in_leaf, const double* leaf_value, double* cuda_scores, const int* cuda_data_index_to_leaf_index, const data_size_t num_data) { const unsigned int threadIdx_x = threadIdx.x; const unsigned int blockIdx_x = blockIdx.x; const unsigned int blockDim_x = blockDim.x; const data_size_t local_data_index = static_cast<data_size_t>(blockIdx_x * blockDim_x + threadIdx_x); if (local_data_index < num_data) { if (USE_BAGGING) { const data_size_t global_data_index = data_indices_in_leaf[local_data_index]; const int leaf_index = cuda_data_index_to_leaf_index[global_data_index]; const double leaf_prediction_value = leaf_value[leaf_index]; cuda_scores[local_data_index] = leaf_prediction_value; } else { const int leaf_index = cuda_data_index_to_leaf_index[local_data_index]; const double leaf_prediction_value = leaf_value[leaf_index]; cuda_scores[local_data_index] = leaf_prediction_value; } } } void CUDADataPartition::LaunchAddPredictionToScoreKernel(const double* leaf_value, double* cuda_scores) { global_timer.Start("CUDADataPartition::AddPredictionToScoreKernel"); const data_size_t num_data_in_root = root_num_data(); const int num_blocks = (num_data_in_root + FILL_INDICES_BLOCK_SIZE_DATA_PARTITION - 1) / FILL_INDICES_BLOCK_SIZE_DATA_PARTITION; if (use_bagging_) { hipLaunchKernelGGL(( AddPredictionToScoreKernel<true>), dim3(num_blocks), dim3(FILL_INDICES_BLOCK_SIZE_DATA_PARTITION), 0, 0, cuda_data_indices_, leaf_value, cuda_scores, cuda_data_index_to_leaf_index_, num_data_in_root); } else { hipLaunchKernelGGL(( AddPredictionToScoreKernel<false>), dim3(num_blocks), dim3(FILL_INDICES_BLOCK_SIZE_DATA_PARTITION), 0, 0, cuda_data_indices_, leaf_value, cuda_scores, cuda_data_index_to_leaf_index_, num_data_in_root); } SynchronizeCUDADevice(__FILE__, __LINE__); global_timer.Stop("CUDADataPartition::AddPredictionToScoreKernel"); } } // namespace LightGBM #endif // USE_CUDA_EXP
30f95129a6071a85e09218ed59b3b7da41645109.cu
/*! * Copyright (c) 2021 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for * license information. */ #ifdef USE_CUDA_EXP #include "cuda_data_partition.hpp" #include <LightGBM/cuda/cuda_algorithms.hpp> #include <LightGBM/tree.h> #include <algorithm> #include <vector> namespace LightGBM { __global__ void FillDataIndicesBeforeTrainKernel(const data_size_t num_data, data_size_t* data_indices, int* cuda_data_index_to_leaf_index) { const unsigned int data_index = threadIdx.x + blockIdx.x * blockDim.x; if (data_index < num_data) { data_indices[data_index] = data_index; cuda_data_index_to_leaf_index[data_index] = 0; } } __global__ void FillDataIndexToLeafIndexKernel( const data_size_t num_data, const data_size_t* data_indices, int* data_index_to_leaf_index) { const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x); if (data_index < num_data) { data_index_to_leaf_index[data_indices[data_index]] = 0; } } void CUDADataPartition::LaunchFillDataIndicesBeforeTrain() { const data_size_t num_data_in_root = root_num_data(); const int num_blocks = (num_data_in_root + FILL_INDICES_BLOCK_SIZE_DATA_PARTITION - 1) / FILL_INDICES_BLOCK_SIZE_DATA_PARTITION; FillDataIndicesBeforeTrainKernel<<<num_blocks, FILL_INDICES_BLOCK_SIZE_DATA_PARTITION>>>(num_data_in_root, cuda_data_indices_, cuda_data_index_to_leaf_index_); } void CUDADataPartition::LaunchFillDataIndexToLeafIndex() { const data_size_t num_data_in_root = root_num_data(); const int num_blocks = (num_data_in_root + FILL_INDICES_BLOCK_SIZE_DATA_PARTITION - 1) / FILL_INDICES_BLOCK_SIZE_DATA_PARTITION; FillDataIndexToLeafIndexKernel<<<num_blocks, FILL_INDICES_BLOCK_SIZE_DATA_PARTITION>>>(num_data_in_root, cuda_data_indices_, cuda_data_index_to_leaf_index_); } __device__ __forceinline__ void PrepareOffset(const data_size_t num_data_in_leaf, uint16_t* block_to_left_offset, data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer, const uint16_t thread_to_left_offset_cnt, uint16_t* shared_mem_buffer) { const unsigned int threadIdx_x = threadIdx.x; const unsigned int blockDim_x = blockDim.x; const uint16_t thread_to_left_offset = ShufflePrefixSum<uint16_t>(thread_to_left_offset_cnt, shared_mem_buffer); const data_size_t num_data_in_block = (blockIdx.x + 1) * blockDim_x <= num_data_in_leaf ? static_cast<data_size_t>(blockDim_x) : num_data_in_leaf - static_cast<data_size_t>(blockIdx.x * blockDim_x); if (static_cast<data_size_t>(threadIdx_x) < num_data_in_block) { block_to_left_offset[threadIdx_x] = thread_to_left_offset; } if (threadIdx_x == blockDim_x - 1) { if (num_data_in_block > 0) { const data_size_t data_to_left = static_cast<data_size_t>(thread_to_left_offset); block_to_left_offset_buffer[blockIdx.x + 1] = data_to_left; block_to_right_offset_buffer[blockIdx.x + 1] = num_data_in_block - data_to_left; } else { block_to_left_offset_buffer[blockIdx.x + 1] = 0; block_to_right_offset_buffer[blockIdx.x + 1] = 0; } } } template <typename T> __device__ bool CUDAFindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } #define UpdateDataIndexToLeafIndexKernel_PARAMS \ const BIN_TYPE* column_data, \ const data_size_t num_data_in_leaf, \ const data_size_t* data_indices_in_leaf, \ const uint32_t th, \ const uint32_t t_zero_bin, \ const uint32_t max_bin, \ const uint32_t min_bin, \ const int left_leaf_index, \ const int right_leaf_index, \ const int default_leaf_index, \ const int missing_default_leaf_index #define UpdateDataIndexToLeafIndex_ARGS \ column_data, \ num_data_in_leaf, \ data_indices_in_leaf, th, \ t_zero_bin, \ max_bin, \ min_bin, \ left_leaf_index, \ right_leaf_index, \ default_leaf_index, \ missing_default_leaf_index template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, bool USE_MIN_BIN, typename BIN_TYPE> __global__ void UpdateDataIndexToLeafIndexKernel( UpdateDataIndexToLeafIndexKernel_PARAMS, int* cuda_data_index_to_leaf_index) { const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x; if (local_data_index < num_data_in_leaf) { const unsigned int global_data_index = data_indices_in_leaf[local_data_index]; const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]); if (!MIN_IS_MAX) { if ((MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) || (MISSING_IS_NA && !MFB_IS_NA && bin == max_bin)) { cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index; } else if ((USE_MIN_BIN && (bin < min_bin || bin > max_bin)) || (!USE_MIN_BIN && bin == 0)) { if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO && MFB_IS_ZERO)) { cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index; } else { cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index; } } else if (bin > th) { cuda_data_index_to_leaf_index[global_data_index] = right_leaf_index; } else { cuda_data_index_to_leaf_index[global_data_index] = left_leaf_index; } } else { if (MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) { cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index; } else if (bin != max_bin) { if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO && MFB_IS_ZERO)) { cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index; } else { cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index; } } else { if (MISSING_IS_NA && !MFB_IS_NA) { cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index; } else { if (!MAX_TO_LEFT) { cuda_data_index_to_leaf_index[global_data_index] = right_leaf_index; } else { cuda_data_index_to_leaf_index[global_data_index] = left_leaf_index; } } } } } } template <typename BIN_TYPE> void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel( UpdateDataIndexToLeafIndexKernel_PARAMS, const bool missing_is_zero, const bool missing_is_na, const bool mfb_is_zero, const bool mfb_is_na, const bool max_to_left, const bool is_single_feature_in_column) { if (min_bin < max_bin) { if (!missing_is_zero) { LaunchUpdateDataIndexToLeafIndexKernel_Inner0<false, false, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column); } else { LaunchUpdateDataIndexToLeafIndexKernel_Inner0<false, true, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column); } } else { if (!missing_is_zero) { LaunchUpdateDataIndexToLeafIndexKernel_Inner0<true, false, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column); } else { LaunchUpdateDataIndexToLeafIndexKernel_Inner0<true, true, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column); } } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, typename BIN_TYPE> void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner0( UpdateDataIndexToLeafIndexKernel_PARAMS, const bool missing_is_na, const bool mfb_is_zero, const bool mfb_is_na, const bool max_to_left, const bool is_single_feature_in_column) { if (!missing_is_na) { LaunchUpdateDataIndexToLeafIndexKernel_Inner1<MIN_IS_MAX, MISSING_IS_ZERO, false, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column); } else { LaunchUpdateDataIndexToLeafIndexKernel_Inner1<MIN_IS_MAX, MISSING_IS_ZERO, true, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, typename BIN_TYPE> void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner1( UpdateDataIndexToLeafIndexKernel_PARAMS, const bool mfb_is_zero, const bool mfb_is_na, const bool max_to_left, const bool is_single_feature_in_column) { if (!mfb_is_zero) { LaunchUpdateDataIndexToLeafIndexKernel_Inner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, false, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, mfb_is_na, max_to_left, is_single_feature_in_column); } else { LaunchUpdateDataIndexToLeafIndexKernel_Inner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, true, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, mfb_is_na, max_to_left, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, typename BIN_TYPE> void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner2( UpdateDataIndexToLeafIndexKernel_PARAMS, const bool mfb_is_na, const bool max_to_left, const bool is_single_feature_in_column) { if (!mfb_is_na) { LaunchUpdateDataIndexToLeafIndexKernel_Inner3<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, false, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, max_to_left, is_single_feature_in_column); } else { LaunchUpdateDataIndexToLeafIndexKernel_Inner3<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, true, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, max_to_left, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, typename BIN_TYPE> void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner3( UpdateDataIndexToLeafIndexKernel_PARAMS, const bool max_to_left, const bool is_single_feature_in_column) { if (!max_to_left) { LaunchUpdateDataIndexToLeafIndexKernel_Inner4<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, false, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, is_single_feature_in_column); } else { LaunchUpdateDataIndexToLeafIndexKernel_Inner4<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, true, BIN_TYPE> (UpdateDataIndexToLeafIndex_ARGS, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, typename BIN_TYPE> void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner4( UpdateDataIndexToLeafIndexKernel_PARAMS, const bool is_single_feature_in_column) { if (!is_single_feature_in_column) { UpdateDataIndexToLeafIndexKernel<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, true, BIN_TYPE> <<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>( UpdateDataIndexToLeafIndex_ARGS, cuda_data_index_to_leaf_index_); } else { UpdateDataIndexToLeafIndexKernel<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, false, BIN_TYPE> <<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>( UpdateDataIndexToLeafIndex_ARGS, cuda_data_index_to_leaf_index_); } } #define GenDataToLeftBitVectorKernel_PARMS \ const BIN_TYPE* column_data, \ const data_size_t num_data_in_leaf, \ const data_size_t* data_indices_in_leaf, \ const uint32_t th, \ const uint32_t t_zero_bin, \ const uint32_t max_bin, \ const uint32_t min_bin, \ const uint8_t split_default_to_left, \ const uint8_t split_missing_default_to_left #define GenBitVector_ARGS \ column_data, \ num_data_in_leaf, \ data_indices_in_leaf, \ th, \ t_zero_bin, \ max_bin, \ min_bin, \ split_default_to_left, \ split_missing_default_to_left template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, bool USE_MIN_BIN, typename BIN_TYPE> __global__ void GenDataToLeftBitVectorKernel( GenDataToLeftBitVectorKernel_PARMS, uint16_t* block_to_left_offset, data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer) { __shared__ uint16_t shared_mem_buffer[32]; uint16_t thread_to_left_offset_cnt = 0; const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x; if (local_data_index < num_data_in_leaf) { const unsigned int global_data_index = data_indices_in_leaf[local_data_index]; const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]); if (!MIN_IS_MAX) { if ((MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) || (MISSING_IS_NA && !MFB_IS_NA && bin == max_bin)) { thread_to_left_offset_cnt = split_missing_default_to_left; } else if ((USE_MIN_BIN && (bin < min_bin || bin > max_bin)) || (!USE_MIN_BIN && bin == 0)) { if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO || MFB_IS_ZERO)) { thread_to_left_offset_cnt = split_missing_default_to_left; } else { thread_to_left_offset_cnt = split_default_to_left; } } else if (bin <= th) { thread_to_left_offset_cnt = 1; } } else { if (MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) { thread_to_left_offset_cnt = split_missing_default_to_left; } else if (bin != max_bin) { if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO && MFB_IS_ZERO)) { thread_to_left_offset_cnt = split_missing_default_to_left; } else { thread_to_left_offset_cnt = split_default_to_left; } } else { if (MISSING_IS_NA && !MFB_IS_NA) { thread_to_left_offset_cnt = split_missing_default_to_left; } else if (MAX_TO_LEFT) { thread_to_left_offset_cnt = 1; } } } } __syncthreads(); PrepareOffset(num_data_in_leaf, block_to_left_offset + blockIdx.x * blockDim.x, block_to_left_offset_buffer, block_to_right_offset_buffer, thread_to_left_offset_cnt, shared_mem_buffer); } template <typename BIN_TYPE> void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner( GenDataToLeftBitVectorKernel_PARMS, const bool missing_is_zero, const bool missing_is_na, const bool mfb_is_zero, const bool mfb_is_na, const bool max_bin_to_left, const bool is_single_feature_in_column) { if (min_bin < max_bin) { if (!missing_is_zero) { LaunchGenDataToLeftBitVectorKernelInner0<false, false, BIN_TYPE> (GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } else { LaunchGenDataToLeftBitVectorKernelInner0<false, true, BIN_TYPE> (GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } } else { if (!missing_is_zero) { LaunchGenDataToLeftBitVectorKernelInner0<true, false, BIN_TYPE> (GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } else { LaunchGenDataToLeftBitVectorKernelInner0<true, true, BIN_TYPE> (GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, typename BIN_TYPE> void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner0( GenDataToLeftBitVectorKernel_PARMS, const bool missing_is_na, const bool mfb_is_zero, const bool mfb_is_na, const bool max_bin_to_left, const bool is_single_feature_in_column) { if (!missing_is_na) { LaunchGenDataToLeftBitVectorKernelInner1<MIN_IS_MAX, MISSING_IS_ZERO, false, BIN_TYPE> (GenBitVector_ARGS, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } else { LaunchGenDataToLeftBitVectorKernelInner1<MIN_IS_MAX, MISSING_IS_ZERO, true, BIN_TYPE> (GenBitVector_ARGS, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, typename BIN_TYPE> void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner1( GenDataToLeftBitVectorKernel_PARMS, const bool mfb_is_zero, const bool mfb_is_na, const bool max_bin_to_left, const bool is_single_feature_in_column) { if (!mfb_is_zero) { LaunchGenDataToLeftBitVectorKernelInner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, false, BIN_TYPE> (GenBitVector_ARGS, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } else { LaunchGenDataToLeftBitVectorKernelInner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, true, BIN_TYPE> (GenBitVector_ARGS, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, typename BIN_TYPE> void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner2( GenDataToLeftBitVectorKernel_PARMS, const bool mfb_is_na, const bool max_bin_to_left, const bool is_single_feature_in_column) { if (!mfb_is_na) { LaunchGenDataToLeftBitVectorKernelInner3 <MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, false, BIN_TYPE> (GenBitVector_ARGS, max_bin_to_left, is_single_feature_in_column); } else { LaunchGenDataToLeftBitVectorKernelInner3 <MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, true, BIN_TYPE> (GenBitVector_ARGS, max_bin_to_left, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, typename BIN_TYPE> void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner3( GenDataToLeftBitVectorKernel_PARMS, const bool max_bin_to_left, const bool is_single_feature_in_column) { if (!max_bin_to_left) { LaunchGenDataToLeftBitVectorKernelInner4 <MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, false, BIN_TYPE> (GenBitVector_ARGS, is_single_feature_in_column); } else { LaunchGenDataToLeftBitVectorKernelInner4 <MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, true, BIN_TYPE> (GenBitVector_ARGS, is_single_feature_in_column); } } template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, typename BIN_TYPE> void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner4( GenDataToLeftBitVectorKernel_PARMS, const bool is_single_feature_in_column) { if (!is_single_feature_in_column) { GenDataToLeftBitVectorKernel <MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, true, BIN_TYPE> <<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_ARGS, cuda_block_to_left_offset_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_); } else { GenDataToLeftBitVectorKernel <MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, false, BIN_TYPE> <<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_ARGS, cuda_block_to_left_offset_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_); } } void CUDADataPartition::LaunchGenDataToLeftBitVectorKernel( const data_size_t num_data_in_leaf, const int split_feature_index, const uint32_t split_threshold, const uint8_t split_default_left, const data_size_t leaf_data_start, const int left_leaf_index, const int right_leaf_index) { const bool missing_is_zero = static_cast<bool>(cuda_column_data_->feature_missing_is_zero(split_feature_index)); const bool missing_is_na = static_cast<bool>(cuda_column_data_->feature_missing_is_na(split_feature_index)); const bool mfb_is_zero = static_cast<bool>(cuda_column_data_->feature_mfb_is_zero(split_feature_index)); const bool mfb_is_na = static_cast<bool>(cuda_column_data_->feature_mfb_is_na(split_feature_index)); const bool is_single_feature_in_column = is_single_feature_in_column_[split_feature_index]; const uint32_t default_bin = cuda_column_data_->feature_default_bin(split_feature_index); const uint32_t most_freq_bin = cuda_column_data_->feature_most_freq_bin(split_feature_index); const uint32_t min_bin = is_single_feature_in_column ? 1 : cuda_column_data_->feature_min_bin(split_feature_index); const uint32_t max_bin = cuda_column_data_->feature_max_bin(split_feature_index); uint32_t th = split_threshold + min_bin; uint32_t t_zero_bin = min_bin + default_bin; if (most_freq_bin == 0) { --th; --t_zero_bin; } uint8_t split_default_to_left = 0; uint8_t split_missing_default_to_left = 0; int default_leaf_index = right_leaf_index; int missing_default_leaf_index = right_leaf_index; if (most_freq_bin <= split_threshold) { split_default_to_left = 1; default_leaf_index = left_leaf_index; } if (missing_is_zero || missing_is_na) { if (split_default_left) { split_missing_default_to_left = 1; missing_default_leaf_index = left_leaf_index; } } const int column_index = cuda_column_data_->feature_to_column(split_feature_index); const uint8_t bit_type = cuda_column_data_->column_bit_type(column_index); const bool max_bin_to_left = (max_bin <= th); const data_size_t* data_indices_in_leaf = cuda_data_indices_ + leaf_data_start; const void* column_data_pointer = cuda_column_data_->GetColumnData(column_index); if (bit_type == 8) { const uint8_t* column_data = reinterpret_cast<const uint8_t*>(column_data_pointer); LaunchGenDataToLeftBitVectorKernelInner<uint8_t>( GenBitVector_ARGS, missing_is_zero, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); LaunchUpdateDataIndexToLeafIndexKernel<uint8_t>( UpdateDataIndexToLeafIndex_ARGS, missing_is_zero, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } else if (bit_type == 16) { const uint16_t* column_data = reinterpret_cast<const uint16_t*>(column_data_pointer); LaunchGenDataToLeftBitVectorKernelInner<uint16_t>( GenBitVector_ARGS, missing_is_zero, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); LaunchUpdateDataIndexToLeafIndexKernel<uint16_t>( UpdateDataIndexToLeafIndex_ARGS, missing_is_zero, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } else if (bit_type == 32) { const uint32_t* column_data = reinterpret_cast<const uint32_t*>(column_data_pointer); LaunchGenDataToLeftBitVectorKernelInner<uint32_t>( GenBitVector_ARGS, missing_is_zero, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); LaunchUpdateDataIndexToLeafIndexKernel<uint32_t>( UpdateDataIndexToLeafIndex_ARGS, missing_is_zero, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column); } } #undef UpdateDataIndexToLeafIndexKernel_PARAMS #undef UpdateDataIndexToLeafIndex_ARGS #undef GenDataToLeftBitVectorKernel_PARMS #undef GenBitVector_ARGS template <typename BIN_TYPE, bool USE_MIN_BIN> __global__ void UpdateDataIndexToLeafIndexKernel_Categorical( const data_size_t num_data_in_leaf, const data_size_t* data_indices_in_leaf, const uint32_t* bitset, const int bitset_len, const BIN_TYPE* column_data, // values from feature const uint32_t max_bin, const uint32_t min_bin, const int8_t mfb_offset, int* cuda_data_index_to_leaf_index, const int left_leaf_index, const int right_leaf_index, const int default_leaf_index) { const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x; if (local_data_index < num_data_in_leaf) { const unsigned int global_data_index = data_indices_in_leaf[local_data_index]; const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]); if (USE_MIN_BIN && (bin < min_bin || bin > max_bin)) { cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index; } else if (!USE_MIN_BIN && bin == 0) { cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index; } else if (CUDAFindInBitset(bitset, bitset_len, bin - min_bin + mfb_offset)) { cuda_data_index_to_leaf_index[global_data_index] = left_leaf_index; } else { cuda_data_index_to_leaf_index[global_data_index] = right_leaf_index; } } } // for categorical features template <typename BIN_TYPE, bool USE_MIN_BIN> __global__ void GenDataToLeftBitVectorKernel_Categorical( const data_size_t num_data_in_leaf, const data_size_t* data_indices_in_leaf, const uint32_t* bitset, int bitset_len, const BIN_TYPE* column_data, // values from feature const uint32_t max_bin, const uint32_t min_bin, const int8_t mfb_offset, const uint8_t split_default_to_left, uint16_t* block_to_left_offset, data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer) { __shared__ uint16_t shared_mem_buffer[32]; uint16_t thread_to_left_offset_cnt = 0; const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x; if (local_data_index < num_data_in_leaf) { const unsigned int global_data_index = data_indices_in_leaf[local_data_index]; const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]); if (USE_MIN_BIN && (bin < min_bin || bin > max_bin)) { thread_to_left_offset_cnt = split_default_to_left; } else if (!USE_MIN_BIN && bin == 0) { thread_to_left_offset_cnt = split_default_to_left; } else if (CUDAFindInBitset(bitset, bitset_len, bin - min_bin + mfb_offset)) { thread_to_left_offset_cnt = 1; } } __syncthreads(); PrepareOffset(num_data_in_leaf, block_to_left_offset + blockIdx.x * blockDim.x, block_to_left_offset_buffer, block_to_right_offset_buffer, thread_to_left_offset_cnt, shared_mem_buffer); } #define GenBitVector_Categorical_ARGS \ num_data_in_leaf, data_indices_in_leaf, \ bitset, bitset_len, \ column_data, max_bin, min_bin, mfb_offset, split_default_to_left, \ cuda_block_to_left_offset_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_ #define UpdateDataIndexToLeafIndex_Categorical_ARGS \ num_data_in_leaf, data_indices_in_leaf, \ bitset, bitset_len, \ column_data, max_bin, min_bin, mfb_offset, \ cuda_data_index_to_leaf_index_, left_leaf_index, right_leaf_index, default_leaf_index void CUDADataPartition::LaunchGenDataToLeftBitVectorCategoricalKernel( const data_size_t num_data_in_leaf, const int split_feature_index, const uint32_t* bitset, const int bitset_len, const uint8_t split_default_left, const data_size_t leaf_data_start, const int left_leaf_index, const int right_leaf_index) { const data_size_t* data_indices_in_leaf = cuda_data_indices_ + leaf_data_start; const int column_index = cuda_column_data_->feature_to_column(split_feature_index); const uint8_t bit_type = cuda_column_data_->column_bit_type(column_index); const bool is_single_feature_in_column = is_single_feature_in_column_[split_feature_index]; const uint32_t min_bin = is_single_feature_in_column ? 1 : cuda_column_data_->feature_min_bin(split_feature_index); const uint32_t max_bin = cuda_column_data_->feature_max_bin(split_feature_index); const uint32_t most_freq_bin = cuda_column_data_->feature_most_freq_bin(split_feature_index); const uint32_t default_bin = cuda_column_data_->feature_default_bin(split_feature_index); const void* column_data_pointer = cuda_column_data_->GetColumnData(column_index); const int8_t mfb_offset = static_cast<int8_t>(most_freq_bin == 0); std::vector<uint32_t> host_bitset(bitset_len, 0); CopyFromCUDADeviceToHost<uint32_t>(host_bitset.data(), bitset, bitset_len, __FILE__, __LINE__); uint8_t split_default_to_left = 0; int default_leaf_index = right_leaf_index; if (most_freq_bin > 0 && Common::FindInBitset(host_bitset.data(), bitset_len, most_freq_bin)) { split_default_to_left = 1; default_leaf_index = left_leaf_index; } if (bit_type == 8) { const uint8_t* column_data = reinterpret_cast<const uint8_t*>(column_data_pointer); if (is_single_feature_in_column) { GenDataToLeftBitVectorKernel_Categorical<uint8_t, false><<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_Categorical_ARGS); UpdateDataIndexToLeafIndexKernel_Categorical<uint8_t, false><<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(UpdateDataIndexToLeafIndex_Categorical_ARGS); } else { GenDataToLeftBitVectorKernel_Categorical<uint8_t, true><<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_Categorical_ARGS); UpdateDataIndexToLeafIndexKernel_Categorical<uint8_t, true><<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(UpdateDataIndexToLeafIndex_Categorical_ARGS); } } else if (bit_type == 16) { const uint16_t* column_data = reinterpret_cast<const uint16_t*>(column_data_pointer); if (is_single_feature_in_column) { GenDataToLeftBitVectorKernel_Categorical<uint16_t, false><<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_Categorical_ARGS); UpdateDataIndexToLeafIndexKernel_Categorical<uint16_t, false><<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(UpdateDataIndexToLeafIndex_Categorical_ARGS); } else { GenDataToLeftBitVectorKernel_Categorical<uint16_t, true><<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_Categorical_ARGS); UpdateDataIndexToLeafIndexKernel_Categorical<uint16_t, true><<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(UpdateDataIndexToLeafIndex_Categorical_ARGS); } } else if (bit_type == 32) { const uint32_t* column_data = reinterpret_cast<const uint32_t*>(column_data_pointer); if (is_single_feature_in_column) { GenDataToLeftBitVectorKernel_Categorical<uint32_t, false><<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_Categorical_ARGS); UpdateDataIndexToLeafIndexKernel_Categorical<uint32_t, false><<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(UpdateDataIndexToLeafIndex_Categorical_ARGS); } else { GenDataToLeftBitVectorKernel_Categorical<uint32_t, true><<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_Categorical_ARGS); UpdateDataIndexToLeafIndexKernel_Categorical<uint32_t, true><<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(UpdateDataIndexToLeafIndex_Categorical_ARGS); } } } #undef GenBitVector_Categorical_ARGS #undef UpdateDataIndexToLeafIndex_Categorical_ARGS __global__ void AggregateBlockOffsetKernel0( const int left_leaf_index, const int right_leaf_index, data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer, data_size_t* cuda_leaf_data_start, data_size_t* cuda_leaf_data_end, data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices, const data_size_t num_blocks) { __shared__ uint32_t shared_mem_buffer[32]; __shared__ uint32_t to_left_total_count; const data_size_t num_data_in_leaf = cuda_leaf_num_data[left_leaf_index]; const unsigned int blockDim_x = blockDim.x; const unsigned int threadIdx_x = threadIdx.x; const data_size_t num_blocks_plus_1 = num_blocks + 1; const uint32_t num_blocks_per_thread = (num_blocks_plus_1 + blockDim_x - 1) / blockDim_x; const uint32_t remain = num_blocks_plus_1 - ((num_blocks_per_thread - 1) * blockDim_x); const uint32_t remain_offset = remain * num_blocks_per_thread; uint32_t thread_start_block_index = 0; uint32_t thread_end_block_index = 0; if (threadIdx_x < remain) { thread_start_block_index = threadIdx_x * num_blocks_per_thread; thread_end_block_index = min(thread_start_block_index + num_blocks_per_thread, num_blocks_plus_1); } else { thread_start_block_index = remain_offset + (num_blocks_per_thread - 1) * (threadIdx_x - remain); thread_end_block_index = min(thread_start_block_index + num_blocks_per_thread - 1, num_blocks_plus_1); } if (threadIdx.x == 0) { block_to_right_offset_buffer[0] = 0; } __syncthreads(); for (uint32_t block_index = thread_start_block_index + 1; block_index < thread_end_block_index; ++block_index) { block_to_left_offset_buffer[block_index] += block_to_left_offset_buffer[block_index - 1]; block_to_right_offset_buffer[block_index] += block_to_right_offset_buffer[block_index - 1]; } __syncthreads(); uint32_t block_to_left_offset = 0; uint32_t block_to_right_offset = 0; if (thread_start_block_index < thread_end_block_index && thread_start_block_index > 1) { block_to_left_offset = block_to_left_offset_buffer[thread_start_block_index - 1]; block_to_right_offset = block_to_right_offset_buffer[thread_start_block_index - 1]; } block_to_left_offset = ShufflePrefixSum<uint32_t>(block_to_left_offset, shared_mem_buffer); __syncthreads(); block_to_right_offset = ShufflePrefixSum<uint32_t>(block_to_right_offset, shared_mem_buffer); if (threadIdx_x == blockDim_x - 1) { to_left_total_count = block_to_left_offset + block_to_left_offset_buffer[num_blocks]; } __syncthreads(); const uint32_t to_left_thread_block_offset = block_to_left_offset; const uint32_t to_right_thread_block_offset = block_to_right_offset + to_left_total_count; for (uint32_t block_index = thread_start_block_index; block_index < thread_end_block_index; ++block_index) { block_to_left_offset_buffer[block_index] += to_left_thread_block_offset; block_to_right_offset_buffer[block_index] += to_right_thread_block_offset; } __syncthreads(); if (blockIdx.x == 0 && threadIdx.x == 0) { const data_size_t old_leaf_data_end = cuda_leaf_data_end[left_leaf_index]; cuda_leaf_data_end[left_leaf_index] = cuda_leaf_data_start[left_leaf_index] + static_cast<data_size_t>(to_left_total_count); cuda_leaf_num_data[left_leaf_index] = static_cast<data_size_t>(to_left_total_count); cuda_leaf_data_start[right_leaf_index] = cuda_leaf_data_end[left_leaf_index]; cuda_leaf_data_end[right_leaf_index] = old_leaf_data_end; cuda_leaf_num_data[right_leaf_index] = num_data_in_leaf - static_cast<data_size_t>(to_left_total_count); } } __global__ void AggregateBlockOffsetKernel1( const int left_leaf_index, const int right_leaf_index, data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer, data_size_t* cuda_leaf_data_start, data_size_t* cuda_leaf_data_end, data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices, const data_size_t num_blocks) { __shared__ uint32_t shared_mem_buffer[32]; __shared__ uint32_t to_left_total_count; const data_size_t num_data_in_leaf = cuda_leaf_num_data[left_leaf_index]; const unsigned int threadIdx_x = threadIdx.x; uint32_t block_to_left_offset = 0; uint32_t block_to_right_offset = 0; if (threadIdx_x < static_cast<unsigned int>(num_blocks)) { block_to_left_offset = block_to_left_offset_buffer[threadIdx_x + 1]; block_to_right_offset = block_to_right_offset_buffer[threadIdx_x + 1]; } block_to_left_offset = ShufflePrefixSum<uint32_t>(block_to_left_offset, shared_mem_buffer); __syncthreads(); block_to_right_offset = ShufflePrefixSum<uint32_t>(block_to_right_offset, shared_mem_buffer); if (threadIdx.x == blockDim.x - 1) { to_left_total_count = block_to_left_offset; } __syncthreads(); if (threadIdx_x < static_cast<unsigned int>(num_blocks)) { block_to_left_offset_buffer[threadIdx_x + 1] = block_to_left_offset; block_to_right_offset_buffer[threadIdx_x + 1] = block_to_right_offset + to_left_total_count; } if (threadIdx_x == 0) { block_to_right_offset_buffer[0] = to_left_total_count; } __syncthreads(); if (blockIdx.x == 0 && threadIdx.x == 0) { const data_size_t old_leaf_data_end = cuda_leaf_data_end[left_leaf_index]; cuda_leaf_data_end[left_leaf_index] = cuda_leaf_data_start[left_leaf_index] + static_cast<data_size_t>(to_left_total_count); cuda_leaf_num_data[left_leaf_index] = static_cast<data_size_t>(to_left_total_count); cuda_leaf_data_start[right_leaf_index] = cuda_leaf_data_end[left_leaf_index]; cuda_leaf_data_end[right_leaf_index] = old_leaf_data_end; cuda_leaf_num_data[right_leaf_index] = num_data_in_leaf - static_cast<data_size_t>(to_left_total_count); } } __global__ void SplitTreeStructureKernel(const int left_leaf_index, const int right_leaf_index, data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer, data_size_t* cuda_leaf_data_start, data_size_t* cuda_leaf_data_end, data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices, const CUDASplitInfo* best_split_info, // for leaf splits information update CUDALeafSplitsStruct* smaller_leaf_splits, CUDALeafSplitsStruct* larger_leaf_splits, const int num_total_bin, hist_t* cuda_hist, hist_t** cuda_hist_pool, double* cuda_leaf_output, int* cuda_split_info_buffer) { const unsigned int to_left_total_cnt = cuda_leaf_num_data[left_leaf_index]; double* cuda_split_info_buffer_for_hessians = reinterpret_cast<double*>(cuda_split_info_buffer + 8); const unsigned int global_thread_index = blockIdx.x * blockDim.x + threadIdx.x; if (global_thread_index == 0) { cuda_leaf_output[left_leaf_index] = best_split_info->left_value; } else if (global_thread_index == 1) { cuda_leaf_output[right_leaf_index] = best_split_info->right_value; } else if (global_thread_index == 2) { cuda_split_info_buffer[0] = left_leaf_index; } else if (global_thread_index == 3) { cuda_split_info_buffer[1] = cuda_leaf_num_data[left_leaf_index]; } else if (global_thread_index == 4) { cuda_split_info_buffer[2] = cuda_leaf_data_start[left_leaf_index]; } else if (global_thread_index == 5) { cuda_split_info_buffer[3] = right_leaf_index; } else if (global_thread_index == 6) { cuda_split_info_buffer[4] = cuda_leaf_num_data[right_leaf_index]; } else if (global_thread_index == 7) { cuda_split_info_buffer[5] = cuda_leaf_data_start[right_leaf_index]; } else if (global_thread_index == 8) { cuda_split_info_buffer_for_hessians[0] = best_split_info->left_sum_hessians; cuda_split_info_buffer_for_hessians[2] = best_split_info->left_sum_gradients; } else if (global_thread_index == 9) { cuda_split_info_buffer_for_hessians[1] = best_split_info->right_sum_hessians; cuda_split_info_buffer_for_hessians[3] = best_split_info->right_sum_gradients; } if (cuda_leaf_num_data[left_leaf_index] < cuda_leaf_num_data[right_leaf_index]) { if (global_thread_index == 0) { hist_t* parent_hist_ptr = cuda_hist_pool[left_leaf_index]; cuda_hist_pool[right_leaf_index] = parent_hist_ptr; cuda_hist_pool[left_leaf_index] = cuda_hist + 2 * right_leaf_index * num_total_bin; smaller_leaf_splits->hist_in_leaf = cuda_hist_pool[left_leaf_index]; larger_leaf_splits->hist_in_leaf = cuda_hist_pool[right_leaf_index]; } else if (global_thread_index == 1) { smaller_leaf_splits->sum_of_gradients = best_split_info->left_sum_gradients; } else if (global_thread_index == 2) { smaller_leaf_splits->sum_of_hessians = best_split_info->left_sum_hessians; } else if (global_thread_index == 3) { smaller_leaf_splits->num_data_in_leaf = to_left_total_cnt; } else if (global_thread_index == 4) { smaller_leaf_splits->gain = best_split_info->left_gain; } else if (global_thread_index == 5) { smaller_leaf_splits->leaf_value = best_split_info->left_value; } else if (global_thread_index == 6) { smaller_leaf_splits->data_indices_in_leaf = cuda_data_indices; } else if (global_thread_index == 7) { larger_leaf_splits->leaf_index = right_leaf_index; } else if (global_thread_index == 8) { larger_leaf_splits->sum_of_gradients = best_split_info->right_sum_gradients; } else if (global_thread_index == 9) { larger_leaf_splits->sum_of_hessians = best_split_info->right_sum_hessians; } else if (global_thread_index == 10) { larger_leaf_splits->num_data_in_leaf = cuda_leaf_num_data[right_leaf_index]; } else if (global_thread_index == 11) { larger_leaf_splits->gain = best_split_info->right_gain; } else if (global_thread_index == 12) { larger_leaf_splits->leaf_value = best_split_info->right_value; } else if (global_thread_index == 13) { larger_leaf_splits->data_indices_in_leaf = cuda_data_indices + cuda_leaf_num_data[left_leaf_index]; } else if (global_thread_index == 14) { cuda_split_info_buffer[6] = left_leaf_index; } else if (global_thread_index == 15) { cuda_split_info_buffer[7] = right_leaf_index; } else if (global_thread_index == 16) { smaller_leaf_splits->leaf_index = left_leaf_index; } } else { if (global_thread_index == 0) { larger_leaf_splits->leaf_index = left_leaf_index; } else if (global_thread_index == 1) { larger_leaf_splits->sum_of_gradients = best_split_info->left_sum_gradients; } else if (global_thread_index == 2) { larger_leaf_splits->sum_of_hessians = best_split_info->left_sum_hessians; } else if (global_thread_index == 3) { larger_leaf_splits->num_data_in_leaf = to_left_total_cnt; } else if (global_thread_index == 4) { larger_leaf_splits->gain = best_split_info->left_gain; } else if (global_thread_index == 5) { larger_leaf_splits->leaf_value = best_split_info->left_value; } else if (global_thread_index == 6) { larger_leaf_splits->data_indices_in_leaf = cuda_data_indices; } else if (global_thread_index == 7) { smaller_leaf_splits->leaf_index = right_leaf_index; } else if (global_thread_index == 8) { smaller_leaf_splits->sum_of_gradients = best_split_info->right_sum_gradients; } else if (global_thread_index == 9) { smaller_leaf_splits->sum_of_hessians = best_split_info->right_sum_hessians; } else if (global_thread_index == 10) { smaller_leaf_splits->num_data_in_leaf = cuda_leaf_num_data[right_leaf_index]; } else if (global_thread_index == 11) { smaller_leaf_splits->gain = best_split_info->right_gain; } else if (global_thread_index == 12) { smaller_leaf_splits->leaf_value = best_split_info->right_value; } else if (global_thread_index == 13) { smaller_leaf_splits->data_indices_in_leaf = cuda_data_indices + cuda_leaf_num_data[left_leaf_index]; } else if (global_thread_index == 14) { cuda_hist_pool[right_leaf_index] = cuda_hist + 2 * right_leaf_index * num_total_bin; smaller_leaf_splits->hist_in_leaf = cuda_hist_pool[right_leaf_index]; } else if (global_thread_index == 15) { larger_leaf_splits->hist_in_leaf = cuda_hist_pool[left_leaf_index]; } else if (global_thread_index == 16) { cuda_split_info_buffer[6] = right_leaf_index; } else if (global_thread_index == 17) { cuda_split_info_buffer[7] = left_leaf_index; } } } __global__ void SplitInnerKernel(const int left_leaf_index, const int right_leaf_index, const data_size_t* cuda_leaf_data_start, const data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices, const data_size_t* block_to_left_offset_buffer, const data_size_t* block_to_right_offset_buffer, const uint16_t* block_to_left_offset, data_size_t* out_data_indices_in_leaf) { const data_size_t leaf_num_data_offset = cuda_leaf_data_start[left_leaf_index]; const data_size_t num_data_in_leaf = cuda_leaf_num_data[left_leaf_index] + cuda_leaf_num_data[right_leaf_index]; const unsigned int threadIdx_x = threadIdx.x; const unsigned int blockDim_x = blockDim.x; const unsigned int global_thread_index = blockIdx.x * blockDim_x + threadIdx_x; const data_size_t* cuda_data_indices_in_leaf = cuda_data_indices + leaf_num_data_offset; const uint16_t* block_to_left_offset_ptr = block_to_left_offset + blockIdx.x * blockDim_x; const uint32_t to_right_block_offset = block_to_right_offset_buffer[blockIdx.x]; const uint32_t to_left_block_offset = block_to_left_offset_buffer[blockIdx.x]; data_size_t* left_out_data_indices_in_leaf = out_data_indices_in_leaf + to_left_block_offset; data_size_t* right_out_data_indices_in_leaf = out_data_indices_in_leaf + to_right_block_offset; if (static_cast<data_size_t>(global_thread_index) < num_data_in_leaf) { const uint32_t thread_to_left_offset = (threadIdx_x == 0 ? 0 : block_to_left_offset_ptr[threadIdx_x - 1]); const bool to_left = block_to_left_offset_ptr[threadIdx_x] > thread_to_left_offset; if (to_left) { left_out_data_indices_in_leaf[thread_to_left_offset] = cuda_data_indices_in_leaf[global_thread_index]; } else { const uint32_t thread_to_right_offset = threadIdx.x - thread_to_left_offset; right_out_data_indices_in_leaf[thread_to_right_offset] = cuda_data_indices_in_leaf[global_thread_index]; } } } __global__ void CopyDataIndicesKernel( const data_size_t num_data_in_leaf, const data_size_t* out_data_indices_in_leaf, data_size_t* cuda_data_indices) { const unsigned int threadIdx_x = threadIdx.x; const unsigned int global_thread_index = blockIdx.x * blockDim.x + threadIdx_x; if (global_thread_index < num_data_in_leaf) { cuda_data_indices[global_thread_index] = out_data_indices_in_leaf[global_thread_index]; } } void CUDADataPartition::LaunchSplitInnerKernel( const data_size_t num_data_in_leaf, const CUDASplitInfo* best_split_info, const int left_leaf_index, const int right_leaf_index, // for leaf splits information update CUDALeafSplitsStruct* smaller_leaf_splits, CUDALeafSplitsStruct* larger_leaf_splits, data_size_t* left_leaf_num_data_ref, data_size_t* right_leaf_num_data_ref, data_size_t* left_leaf_start_ref, data_size_t* right_leaf_start_ref, double* left_leaf_sum_of_hessians_ref, double* right_leaf_sum_of_hessians_ref, double* left_leaf_sum_of_gradients_ref, double* right_leaf_sum_of_gradients_ref) { int num_blocks_final_ref = grid_dim_ - 1; int num_blocks_final_aligned = 1; while (num_blocks_final_ref > 0) { num_blocks_final_aligned <<= 1; num_blocks_final_ref >>= 1; } global_timer.Start("CUDADataPartition::AggregateBlockOffsetKernel"); if (grid_dim_ > AGGREGATE_BLOCK_SIZE_DATA_PARTITION) { AggregateBlockOffsetKernel0<<<1, AGGREGATE_BLOCK_SIZE_DATA_PARTITION, 0, cuda_streams_[0]>>>( left_leaf_index, right_leaf_index, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_, cuda_leaf_data_start_, cuda_leaf_data_end_, cuda_leaf_num_data_, cuda_data_indices_, grid_dim_); } else { AggregateBlockOffsetKernel1<<<1, num_blocks_final_aligned, 0, cuda_streams_[0]>>>( left_leaf_index, right_leaf_index, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_, cuda_leaf_data_start_, cuda_leaf_data_end_, cuda_leaf_num_data_, cuda_data_indices_, grid_dim_); } SynchronizeCUDADevice(__FILE__, __LINE__); global_timer.Stop("CUDADataPartition::AggregateBlockOffsetKernel"); global_timer.Start("CUDADataPartition::SplitInnerKernel"); SplitInnerKernel<<<grid_dim_, block_dim_, 0, cuda_streams_[1]>>>( left_leaf_index, right_leaf_index, cuda_leaf_data_start_, cuda_leaf_num_data_, cuda_data_indices_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_, cuda_block_to_left_offset_, cuda_out_data_indices_in_leaf_); global_timer.Stop("CUDADataPartition::SplitInnerKernel"); SynchronizeCUDADevice(__FILE__, __LINE__); global_timer.Start("CUDADataPartition::SplitTreeStructureKernel"); SplitTreeStructureKernel<<<4, 5, 0, cuda_streams_[0]>>>(left_leaf_index, right_leaf_index, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_, cuda_leaf_data_start_, cuda_leaf_data_end_, cuda_leaf_num_data_, cuda_out_data_indices_in_leaf_, best_split_info, smaller_leaf_splits, larger_leaf_splits, num_total_bin_, cuda_hist_, cuda_hist_pool_, cuda_leaf_output_, cuda_split_info_buffer_); global_timer.Stop("CUDADataPartition::SplitTreeStructureKernel"); std::vector<int> cpu_split_info_buffer(16); const double* cpu_sum_hessians_info = reinterpret_cast<const double*>(cpu_split_info_buffer.data() + 8); global_timer.Start("CUDADataPartition::CopyFromCUDADeviceToHostAsync"); CopyFromCUDADeviceToHostAsync<int>(cpu_split_info_buffer.data(), cuda_split_info_buffer_, 16, cuda_streams_[0], __FILE__, __LINE__); SynchronizeCUDADevice(__FILE__, __LINE__); global_timer.Stop("CUDADataPartition::CopyFromCUDADeviceToHostAsync"); const data_size_t left_leaf_num_data = cpu_split_info_buffer[1]; const data_size_t left_leaf_data_start = cpu_split_info_buffer[2]; const data_size_t right_leaf_num_data = cpu_split_info_buffer[4]; global_timer.Start("CUDADataPartition::CopyDataIndicesKernel"); CopyDataIndicesKernel<<<grid_dim_, block_dim_, 0, cuda_streams_[2]>>>( left_leaf_num_data + right_leaf_num_data, cuda_out_data_indices_in_leaf_, cuda_data_indices_ + left_leaf_data_start); global_timer.Stop("CUDADataPartition::CopyDataIndicesKernel"); const data_size_t right_leaf_data_start = cpu_split_info_buffer[5]; *left_leaf_num_data_ref = left_leaf_num_data; *left_leaf_start_ref = left_leaf_data_start; *right_leaf_num_data_ref = right_leaf_num_data; *right_leaf_start_ref = right_leaf_data_start; *left_leaf_sum_of_hessians_ref = cpu_sum_hessians_info[0]; *right_leaf_sum_of_hessians_ref = cpu_sum_hessians_info[1]; *left_leaf_sum_of_gradients_ref = cpu_sum_hessians_info[2]; *right_leaf_sum_of_gradients_ref = cpu_sum_hessians_info[3]; } template <bool USE_BAGGING> __global__ void AddPredictionToScoreKernel( const data_size_t* data_indices_in_leaf, const double* leaf_value, double* cuda_scores, const int* cuda_data_index_to_leaf_index, const data_size_t num_data) { const unsigned int threadIdx_x = threadIdx.x; const unsigned int blockIdx_x = blockIdx.x; const unsigned int blockDim_x = blockDim.x; const data_size_t local_data_index = static_cast<data_size_t>(blockIdx_x * blockDim_x + threadIdx_x); if (local_data_index < num_data) { if (USE_BAGGING) { const data_size_t global_data_index = data_indices_in_leaf[local_data_index]; const int leaf_index = cuda_data_index_to_leaf_index[global_data_index]; const double leaf_prediction_value = leaf_value[leaf_index]; cuda_scores[local_data_index] = leaf_prediction_value; } else { const int leaf_index = cuda_data_index_to_leaf_index[local_data_index]; const double leaf_prediction_value = leaf_value[leaf_index]; cuda_scores[local_data_index] = leaf_prediction_value; } } } void CUDADataPartition::LaunchAddPredictionToScoreKernel(const double* leaf_value, double* cuda_scores) { global_timer.Start("CUDADataPartition::AddPredictionToScoreKernel"); const data_size_t num_data_in_root = root_num_data(); const int num_blocks = (num_data_in_root + FILL_INDICES_BLOCK_SIZE_DATA_PARTITION - 1) / FILL_INDICES_BLOCK_SIZE_DATA_PARTITION; if (use_bagging_) { AddPredictionToScoreKernel<true><<<num_blocks, FILL_INDICES_BLOCK_SIZE_DATA_PARTITION>>>( cuda_data_indices_, leaf_value, cuda_scores, cuda_data_index_to_leaf_index_, num_data_in_root); } else { AddPredictionToScoreKernel<false><<<num_blocks, FILL_INDICES_BLOCK_SIZE_DATA_PARTITION>>>( cuda_data_indices_, leaf_value, cuda_scores, cuda_data_index_to_leaf_index_, num_data_in_root); } SynchronizeCUDADevice(__FILE__, __LINE__); global_timer.Stop("CUDADataPartition::AddPredictionToScoreKernel"); } } // namespace LightGBM #endif // USE_CUDA_EXP
145fc81d1c86a676d70995685014682405747b2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_calc_dt_kernel_print; int xdim0_calc_dt_kernel_print_h = -1; __constant__ int ydim0_calc_dt_kernel_print; int ydim0_calc_dt_kernel_print_h = -1; __constant__ int xdim1_calc_dt_kernel_print; int xdim1_calc_dt_kernel_print_h = -1; __constant__ int ydim1_calc_dt_kernel_print; int ydim1_calc_dt_kernel_print_h = -1; __constant__ int xdim2_calc_dt_kernel_print; int xdim2_calc_dt_kernel_print_h = -1; __constant__ int ydim2_calc_dt_kernel_print; int ydim2_calc_dt_kernel_print_h = -1; __constant__ int xdim3_calc_dt_kernel_print; int xdim3_calc_dt_kernel_print_h = -1; __constant__ int ydim3_calc_dt_kernel_print; int ydim3_calc_dt_kernel_print_h = -1; __constant__ int xdim4_calc_dt_kernel_print; int xdim4_calc_dt_kernel_print_h = -1; __constant__ int ydim4_calc_dt_kernel_print; int ydim4_calc_dt_kernel_print_h = -1; __constant__ int xdim5_calc_dt_kernel_print; int xdim5_calc_dt_kernel_print_h = -1; __constant__ int ydim5_calc_dt_kernel_print; int ydim5_calc_dt_kernel_print_h = -1; __constant__ int xdim6_calc_dt_kernel_print; int xdim6_calc_dt_kernel_print_h = -1; __constant__ int ydim6_calc_dt_kernel_print; int ydim6_calc_dt_kernel_print_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x, y, z) \ (x + xdim0_calc_dt_kernel_print * (y) + \ xdim0_calc_dt_kernel_print * ydim0_calc_dt_kernel_print * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_calc_dt_kernel_print * (y) + \ xdim1_calc_dt_kernel_print * ydim1_calc_dt_kernel_print * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_calc_dt_kernel_print * (y) + \ xdim2_calc_dt_kernel_print * ydim2_calc_dt_kernel_print * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_calc_dt_kernel_print * (y) + \ xdim3_calc_dt_kernel_print * ydim3_calc_dt_kernel_print * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_calc_dt_kernel_print * (y) + \ xdim4_calc_dt_kernel_print * ydim4_calc_dt_kernel_print * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_calc_dt_kernel_print * (y) + \ xdim5_calc_dt_kernel_print * ydim5_calc_dt_kernel_print * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_calc_dt_kernel_print * (y) + \ xdim6_calc_dt_kernel_print * ydim6_calc_dt_kernel_print * (z)) // user function __device__ void calc_dt_kernel_print_gpu(const double *xvel0, const double *yvel0, const double *zvel0, const double *density0, const double *energy0, const double *pressure, const double *soundspeed, double *output) { output[0] = xvel0[OPS_ACC0(0, 0, 0)]; output[1] = yvel0[OPS_ACC1(0, 0, 0)]; output[2] = zvel0[OPS_ACC2(0, 0, 0)]; output[3] = xvel0[OPS_ACC0(1, 0, 0)]; output[4] = yvel0[OPS_ACC1(1, 0, 0)]; output[5] = zvel0[OPS_ACC2(0, 0, 0)]; output[6] = xvel0[OPS_ACC0(1, 1, 0)]; output[7] = yvel0[OPS_ACC1(1, 1, 0)]; output[8] = zvel0[OPS_ACC2(0, 0, 0)]; output[9] = xvel0[OPS_ACC0(0, 1, 0)]; output[10] = yvel0[OPS_ACC1(0, 1, 0)]; output[11] = zvel0[OPS_ACC2(0, 0, 0)]; output[12] = xvel0[OPS_ACC0(0, 0, 1)]; output[13] = yvel0[OPS_ACC1(0, 0, 1)]; output[14] = zvel0[OPS_ACC2(0, 0, 1)]; output[15] = xvel0[OPS_ACC0(1, 0, 1)]; output[16] = yvel0[OPS_ACC1(1, 0, 1)]; output[17] = zvel0[OPS_ACC2(0, 0, 1)]; output[18] = xvel0[OPS_ACC0(1, 1, 1)]; output[19] = yvel0[OPS_ACC1(1, 1, 1)]; output[20] = zvel0[OPS_ACC2(0, 0, 1)]; output[21] = xvel0[OPS_ACC0(0, 1, 1)]; output[22] = yvel0[OPS_ACC1(0, 1, 1)]; output[23] = zvel0[OPS_ACC2(0, 0, 1)]; output[24] = density0[OPS_ACC3(0, 0, 0)]; output[25] = energy0[OPS_ACC4(0, 0, 0)]; output[26] = pressure[OPS_ACC5(0, 0, 0)]; output[27] = soundspeed[OPS_ACC6(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_calc_dt_kernel_print( const double *__restrict arg0, const double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, const double *__restrict arg6, double *__restrict arg7, int size0, int size1, int size2) { double arg7_l[28]; for (int d = 0; d < 28; d++) arg7_l[d] = ZERO_double; int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_calc_dt_kernel_print + idx_z * 1 * 1 * xdim0_calc_dt_kernel_print * ydim0_calc_dt_kernel_print; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_calc_dt_kernel_print + idx_z * 1 * 1 * xdim1_calc_dt_kernel_print * ydim1_calc_dt_kernel_print; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_calc_dt_kernel_print + idx_z * 1 * 1 * xdim2_calc_dt_kernel_print * ydim2_calc_dt_kernel_print; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_calc_dt_kernel_print + idx_z * 1 * 1 * xdim3_calc_dt_kernel_print * ydim3_calc_dt_kernel_print; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_calc_dt_kernel_print + idx_z * 1 * 1 * xdim4_calc_dt_kernel_print * ydim4_calc_dt_kernel_print; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_calc_dt_kernel_print + idx_z * 1 * 1 * xdim5_calc_dt_kernel_print * ydim5_calc_dt_kernel_print; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_calc_dt_kernel_print + idx_z * 1 * 1 * xdim6_calc_dt_kernel_print * ydim6_calc_dt_kernel_print; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { calc_dt_kernel_print_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7_l); } for (int d = 0; d < 28; d++) ops_reduction_cuda<OPS_INC>(&arg7[d + (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y) * 28], arg7_l[d]); } // host stub function void ops_par_loop_calc_dt_kernel_print(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 8, range, 40)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(40, "calc_dt_kernel_print"); OPS_kernels[40].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_calc_dt_kernel_print_h || ydim0 != ydim0_calc_dt_kernel_print_h || xdim1 != xdim1_calc_dt_kernel_print_h || ydim1 != ydim1_calc_dt_kernel_print_h || xdim2 != xdim2_calc_dt_kernel_print_h || ydim2 != ydim2_calc_dt_kernel_print_h || xdim3 != xdim3_calc_dt_kernel_print_h || ydim3 != ydim3_calc_dt_kernel_print_h || xdim4 != xdim4_calc_dt_kernel_print_h || ydim4 != ydim4_calc_dt_kernel_print_h || xdim5 != xdim5_calc_dt_kernel_print_h || ydim5 != ydim5_calc_dt_kernel_print_h || xdim6 != xdim6_calc_dt_kernel_print_h || ydim6 != ydim6_calc_dt_kernel_print_h) { hipMemcpyToSymbol(xdim0_calc_dt_kernel_print, &xdim0, sizeof(int)); xdim0_calc_dt_kernel_print_h = xdim0; hipMemcpyToSymbol(ydim0_calc_dt_kernel_print, &ydim0, sizeof(int)); ydim0_calc_dt_kernel_print_h = ydim0; hipMemcpyToSymbol(xdim1_calc_dt_kernel_print, &xdim1, sizeof(int)); xdim1_calc_dt_kernel_print_h = xdim1; hipMemcpyToSymbol(ydim1_calc_dt_kernel_print, &ydim1, sizeof(int)); ydim1_calc_dt_kernel_print_h = ydim1; hipMemcpyToSymbol(xdim2_calc_dt_kernel_print, &xdim2, sizeof(int)); xdim2_calc_dt_kernel_print_h = xdim2; hipMemcpyToSymbol(ydim2_calc_dt_kernel_print, &ydim2, sizeof(int)); ydim2_calc_dt_kernel_print_h = ydim2; hipMemcpyToSymbol(xdim3_calc_dt_kernel_print, &xdim3, sizeof(int)); xdim3_calc_dt_kernel_print_h = xdim3; hipMemcpyToSymbol(ydim3_calc_dt_kernel_print, &ydim3, sizeof(int)); ydim3_calc_dt_kernel_print_h = ydim3; hipMemcpyToSymbol(xdim4_calc_dt_kernel_print, &xdim4, sizeof(int)); xdim4_calc_dt_kernel_print_h = xdim4; hipMemcpyToSymbol(ydim4_calc_dt_kernel_print, &ydim4, sizeof(int)); ydim4_calc_dt_kernel_print_h = ydim4; hipMemcpyToSymbol(xdim5_calc_dt_kernel_print, &xdim5, sizeof(int)); xdim5_calc_dt_kernel_print_h = xdim5; hipMemcpyToSymbol(ydim5_calc_dt_kernel_print, &ydim5, sizeof(int)); ydim5_calc_dt_kernel_print_h = ydim5; hipMemcpyToSymbol(xdim6_calc_dt_kernel_print, &xdim6, sizeof(int)); xdim6_calc_dt_kernel_print_h = xdim6; hipMemcpyToSymbol(ydim6_calc_dt_kernel_print, &ydim6, sizeof(int)); ydim6_calc_dt_kernel_print_h = ydim6; } #ifdef OPS_MPI double *arg7h = (double *)(((ops_reduction)args[7].data)->data + ((ops_reduction)args[7].data)->size * block->index); #else double *arg7h = (double *)(((ops_reduction)args[7].data)->data); #endif dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int nblocks = ((x_size - 1) / OPS_block_size_x + 1) * ((y_size - 1) / OPS_block_size_y + 1) * z_size; int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks * 28 * sizeof(double)); reduct_size = MAX(reduct_size, sizeof(double) * 28); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg7.data = OPS_reduct_h + reduct_bytes; arg7.data_d = OPS_reduct_d + reduct_bytes; for (int b = 0; b < maxblocks; b++) for (int d = 0; d < 28; d++) ((double *)arg7.data)[d + b * 28] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks * 28 * sizeof(double)); mvReductArraysToDevice(reduct_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; char *p_a[8]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[40].mpi_time += t2 - t1; } int nshared = 0; int nthread = OPS_block_size_x * OPS_block_size_y; nshared = MAX(nshared, sizeof(double) * 28); nshared = MAX(nshared * nthread, reduct_size * nthread); // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_calc_dt_kernel_print), dim3(grid), dim3(tblock), nshared, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)arg7.data_d, x_size, y_size, z_size); mvReductArraysToHost(reduct_bytes); for (int b = 0; b < maxblocks; b++) { for (int d = 0; d < 28; d++) { arg7h[d] = arg7h[d] + ((double *)arg7.data)[d + b * 28]; } } arg7.data = (char *)arg7h; if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[40].time += t1 - t2; } ops_set_dirtybit_device(args, 8); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[40].mpi_time += t2 - t1; OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg6); } }
145fc81d1c86a676d70995685014682405747b2b.cu
// // auto-generated by ops.py // __constant__ int xdim0_calc_dt_kernel_print; int xdim0_calc_dt_kernel_print_h = -1; __constant__ int ydim0_calc_dt_kernel_print; int ydim0_calc_dt_kernel_print_h = -1; __constant__ int xdim1_calc_dt_kernel_print; int xdim1_calc_dt_kernel_print_h = -1; __constant__ int ydim1_calc_dt_kernel_print; int ydim1_calc_dt_kernel_print_h = -1; __constant__ int xdim2_calc_dt_kernel_print; int xdim2_calc_dt_kernel_print_h = -1; __constant__ int ydim2_calc_dt_kernel_print; int ydim2_calc_dt_kernel_print_h = -1; __constant__ int xdim3_calc_dt_kernel_print; int xdim3_calc_dt_kernel_print_h = -1; __constant__ int ydim3_calc_dt_kernel_print; int ydim3_calc_dt_kernel_print_h = -1; __constant__ int xdim4_calc_dt_kernel_print; int xdim4_calc_dt_kernel_print_h = -1; __constant__ int ydim4_calc_dt_kernel_print; int ydim4_calc_dt_kernel_print_h = -1; __constant__ int xdim5_calc_dt_kernel_print; int xdim5_calc_dt_kernel_print_h = -1; __constant__ int ydim5_calc_dt_kernel_print; int ydim5_calc_dt_kernel_print_h = -1; __constant__ int xdim6_calc_dt_kernel_print; int xdim6_calc_dt_kernel_print_h = -1; __constant__ int ydim6_calc_dt_kernel_print; int ydim6_calc_dt_kernel_print_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x, y, z) \ (x + xdim0_calc_dt_kernel_print * (y) + \ xdim0_calc_dt_kernel_print * ydim0_calc_dt_kernel_print * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_calc_dt_kernel_print * (y) + \ xdim1_calc_dt_kernel_print * ydim1_calc_dt_kernel_print * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_calc_dt_kernel_print * (y) + \ xdim2_calc_dt_kernel_print * ydim2_calc_dt_kernel_print * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_calc_dt_kernel_print * (y) + \ xdim3_calc_dt_kernel_print * ydim3_calc_dt_kernel_print * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_calc_dt_kernel_print * (y) + \ xdim4_calc_dt_kernel_print * ydim4_calc_dt_kernel_print * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_calc_dt_kernel_print * (y) + \ xdim5_calc_dt_kernel_print * ydim5_calc_dt_kernel_print * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_calc_dt_kernel_print * (y) + \ xdim6_calc_dt_kernel_print * ydim6_calc_dt_kernel_print * (z)) // user function __device__ void calc_dt_kernel_print_gpu(const double *xvel0, const double *yvel0, const double *zvel0, const double *density0, const double *energy0, const double *pressure, const double *soundspeed, double *output) { output[0] = xvel0[OPS_ACC0(0, 0, 0)]; output[1] = yvel0[OPS_ACC1(0, 0, 0)]; output[2] = zvel0[OPS_ACC2(0, 0, 0)]; output[3] = xvel0[OPS_ACC0(1, 0, 0)]; output[4] = yvel0[OPS_ACC1(1, 0, 0)]; output[5] = zvel0[OPS_ACC2(0, 0, 0)]; output[6] = xvel0[OPS_ACC0(1, 1, 0)]; output[7] = yvel0[OPS_ACC1(1, 1, 0)]; output[8] = zvel0[OPS_ACC2(0, 0, 0)]; output[9] = xvel0[OPS_ACC0(0, 1, 0)]; output[10] = yvel0[OPS_ACC1(0, 1, 0)]; output[11] = zvel0[OPS_ACC2(0, 0, 0)]; output[12] = xvel0[OPS_ACC0(0, 0, 1)]; output[13] = yvel0[OPS_ACC1(0, 0, 1)]; output[14] = zvel0[OPS_ACC2(0, 0, 1)]; output[15] = xvel0[OPS_ACC0(1, 0, 1)]; output[16] = yvel0[OPS_ACC1(1, 0, 1)]; output[17] = zvel0[OPS_ACC2(0, 0, 1)]; output[18] = xvel0[OPS_ACC0(1, 1, 1)]; output[19] = yvel0[OPS_ACC1(1, 1, 1)]; output[20] = zvel0[OPS_ACC2(0, 0, 1)]; output[21] = xvel0[OPS_ACC0(0, 1, 1)]; output[22] = yvel0[OPS_ACC1(0, 1, 1)]; output[23] = zvel0[OPS_ACC2(0, 0, 1)]; output[24] = density0[OPS_ACC3(0, 0, 0)]; output[25] = energy0[OPS_ACC4(0, 0, 0)]; output[26] = pressure[OPS_ACC5(0, 0, 0)]; output[27] = soundspeed[OPS_ACC6(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_calc_dt_kernel_print( const double *__restrict arg0, const double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, const double *__restrict arg6, double *__restrict arg7, int size0, int size1, int size2) { double arg7_l[28]; for (int d = 0; d < 28; d++) arg7_l[d] = ZERO_double; int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_calc_dt_kernel_print + idx_z * 1 * 1 * xdim0_calc_dt_kernel_print * ydim0_calc_dt_kernel_print; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_calc_dt_kernel_print + idx_z * 1 * 1 * xdim1_calc_dt_kernel_print * ydim1_calc_dt_kernel_print; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_calc_dt_kernel_print + idx_z * 1 * 1 * xdim2_calc_dt_kernel_print * ydim2_calc_dt_kernel_print; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_calc_dt_kernel_print + idx_z * 1 * 1 * xdim3_calc_dt_kernel_print * ydim3_calc_dt_kernel_print; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_calc_dt_kernel_print + idx_z * 1 * 1 * xdim4_calc_dt_kernel_print * ydim4_calc_dt_kernel_print; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_calc_dt_kernel_print + idx_z * 1 * 1 * xdim5_calc_dt_kernel_print * ydim5_calc_dt_kernel_print; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_calc_dt_kernel_print + idx_z * 1 * 1 * xdim6_calc_dt_kernel_print * ydim6_calc_dt_kernel_print; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { calc_dt_kernel_print_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7_l); } for (int d = 0; d < 28; d++) ops_reduction_cuda<OPS_INC>(&arg7[d + (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y) * 28], arg7_l[d]); } // host stub function void ops_par_loop_calc_dt_kernel_print(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 8, range, 40)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(40, "calc_dt_kernel_print"); OPS_kernels[40].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_calc_dt_kernel_print_h || ydim0 != ydim0_calc_dt_kernel_print_h || xdim1 != xdim1_calc_dt_kernel_print_h || ydim1 != ydim1_calc_dt_kernel_print_h || xdim2 != xdim2_calc_dt_kernel_print_h || ydim2 != ydim2_calc_dt_kernel_print_h || xdim3 != xdim3_calc_dt_kernel_print_h || ydim3 != ydim3_calc_dt_kernel_print_h || xdim4 != xdim4_calc_dt_kernel_print_h || ydim4 != ydim4_calc_dt_kernel_print_h || xdim5 != xdim5_calc_dt_kernel_print_h || ydim5 != ydim5_calc_dt_kernel_print_h || xdim6 != xdim6_calc_dt_kernel_print_h || ydim6 != ydim6_calc_dt_kernel_print_h) { cudaMemcpyToSymbol(xdim0_calc_dt_kernel_print, &xdim0, sizeof(int)); xdim0_calc_dt_kernel_print_h = xdim0; cudaMemcpyToSymbol(ydim0_calc_dt_kernel_print, &ydim0, sizeof(int)); ydim0_calc_dt_kernel_print_h = ydim0; cudaMemcpyToSymbol(xdim1_calc_dt_kernel_print, &xdim1, sizeof(int)); xdim1_calc_dt_kernel_print_h = xdim1; cudaMemcpyToSymbol(ydim1_calc_dt_kernel_print, &ydim1, sizeof(int)); ydim1_calc_dt_kernel_print_h = ydim1; cudaMemcpyToSymbol(xdim2_calc_dt_kernel_print, &xdim2, sizeof(int)); xdim2_calc_dt_kernel_print_h = xdim2; cudaMemcpyToSymbol(ydim2_calc_dt_kernel_print, &ydim2, sizeof(int)); ydim2_calc_dt_kernel_print_h = ydim2; cudaMemcpyToSymbol(xdim3_calc_dt_kernel_print, &xdim3, sizeof(int)); xdim3_calc_dt_kernel_print_h = xdim3; cudaMemcpyToSymbol(ydim3_calc_dt_kernel_print, &ydim3, sizeof(int)); ydim3_calc_dt_kernel_print_h = ydim3; cudaMemcpyToSymbol(xdim4_calc_dt_kernel_print, &xdim4, sizeof(int)); xdim4_calc_dt_kernel_print_h = xdim4; cudaMemcpyToSymbol(ydim4_calc_dt_kernel_print, &ydim4, sizeof(int)); ydim4_calc_dt_kernel_print_h = ydim4; cudaMemcpyToSymbol(xdim5_calc_dt_kernel_print, &xdim5, sizeof(int)); xdim5_calc_dt_kernel_print_h = xdim5; cudaMemcpyToSymbol(ydim5_calc_dt_kernel_print, &ydim5, sizeof(int)); ydim5_calc_dt_kernel_print_h = ydim5; cudaMemcpyToSymbol(xdim6_calc_dt_kernel_print, &xdim6, sizeof(int)); xdim6_calc_dt_kernel_print_h = xdim6; cudaMemcpyToSymbol(ydim6_calc_dt_kernel_print, &ydim6, sizeof(int)); ydim6_calc_dt_kernel_print_h = ydim6; } #ifdef OPS_MPI double *arg7h = (double *)(((ops_reduction)args[7].data)->data + ((ops_reduction)args[7].data)->size * block->index); #else double *arg7h = (double *)(((ops_reduction)args[7].data)->data); #endif dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int nblocks = ((x_size - 1) / OPS_block_size_x + 1) * ((y_size - 1) / OPS_block_size_y + 1) * z_size; int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks * 28 * sizeof(double)); reduct_size = MAX(reduct_size, sizeof(double) * 28); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg7.data = OPS_reduct_h + reduct_bytes; arg7.data_d = OPS_reduct_d + reduct_bytes; for (int b = 0; b < maxblocks; b++) for (int d = 0; d < 28; d++) ((double *)arg7.data)[d + b * 28] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks * 28 * sizeof(double)); mvReductArraysToDevice(reduct_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; char *p_a[8]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[40].mpi_time += t2 - t1; } int nshared = 0; int nthread = OPS_block_size_x * OPS_block_size_y; nshared = MAX(nshared, sizeof(double) * 28); nshared = MAX(nshared * nthread, reduct_size * nthread); // call kernel wrapper function, passing in pointers to data ops_calc_dt_kernel_print<<<grid, tblock, nshared>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)arg7.data_d, x_size, y_size, z_size); mvReductArraysToHost(reduct_bytes); for (int b = 0; b < maxblocks; b++) { for (int d = 0; d < 28; d++) { arg7h[d] = arg7h[d] + ((double *)arg7.data)[d + b * 28]; } } arg7.data = (char *)arg7h; if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[40].time += t1 - t2; } ops_set_dirtybit_device(args, 8); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[40].mpi_time += t2 - t1; OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[40].transfer += ops_compute_transfer(dim, start, end, &arg6); } }
cf6a211967a49f799fdd284dfb012a18e6ede3e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation and/or // other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON // ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Author's note: // This file was distributed as part of the Nature Biotechnology // supplementary software release for DeepBind. Users of DeepBind // are encouraged to instead use the latest source code and binaries // for scoring sequences at // http://tools.genes.toronto.edu/deepbind/ // #include <smat_cuda/cuda_errors.h> #include <smat_cuda/cuda_context.h> #include <smat_cuda/launch_util.h> #include <smat/vm/instruction_db.h> using namespace sm; __global__ void kernel_dropoutord_fp_tr(hiprandState_t* state, const uint8_t* X, uint8_t* Z, bool* M, usize_t n, float rate) { DECL_KERNEL_VARS unsigned tid = bdx*bx + tx; hiprandState_t local_state = state[tid]; for (usize_t i = (usize_t)tid; i < n; i += bdx*gdx) { bool mask = (X[i] == 255) || (hiprand_uniform(&local_state) >= rate); M[i] = mask; Z[i] = mask ? X[i] : 254; } state[tid] = local_state; } void launch_dropoutord_fp_tr(hipStream_t stream, const uint8_t* X, uint8_t* Z, bool* M, usize_t n, float rate) { launchcfg cfg = make_elemwise_launchcfg(n); hipLaunchKernelGGL(( kernel_dropoutord_fp_tr), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, thread_cudactx().curand_state(),X,Z,M,n,rate); }
cf6a211967a49f799fdd284dfb012a18e6ede3e4.cu
// Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation and/or // other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON // ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Author's note: // This file was distributed as part of the Nature Biotechnology // supplementary software release for DeepBind. Users of DeepBind // are encouraged to instead use the latest source code and binaries // for scoring sequences at // http://tools.genes.toronto.edu/deepbind/ // #include <smat_cuda/cuda_errors.h> #include <smat_cuda/cuda_context.h> #include <smat_cuda/launch_util.h> #include <smat/vm/instruction_db.h> using namespace sm; __global__ void kernel_dropoutord_fp_tr(curandState_t* state, const uint8_t* X, uint8_t* Z, bool* M, usize_t n, float rate) { DECL_KERNEL_VARS unsigned tid = bdx*bx + tx; curandState local_state = state[tid]; for (usize_t i = (usize_t)tid; i < n; i += bdx*gdx) { bool mask = (X[i] == 255) || (curand_uniform(&local_state) >= rate); M[i] = mask; Z[i] = mask ? X[i] : 254; } state[tid] = local_state; } void launch_dropoutord_fp_tr(cudaStream_t stream, const uint8_t* X, uint8_t* Z, bool* M, usize_t n, float rate) { launchcfg cfg = make_elemwise_launchcfg(n); kernel_dropoutord_fp_tr<<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>>(thread_cudactx().curand_state(),X,Z,M,n,rate); }
b39d2d624eced07aecabb034a706c9f0b6e40cbc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hip/hip_fp16.h> #include <cassert> #include <algorithm> #include "ResizeBilinear.hpp" // TODO: Move this to a common header inline bool is_CHW(nvinfer1::Dims const& dims) { return (dims.nbDims == 3 && dims.type[0] == nvinfer1::DimensionType::kCHANNEL && dims.type[1] == nvinfer1::DimensionType::kSPATIAL && dims.type[2] == nvinfer1::DimensionType::kSPATIAL); } nvinfer1::Dims ResizeBilinearPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) { assert(nbInputs == 1); nvinfer1::Dims const& input = inputDims[0]; assert(is_CHW(input)); assert(_ndims == 2); assert(index == 0); nvinfer1::Dims output; output.nbDims = input.nbDims; int s = 0; for( int d=0; d<input.nbDims; ++d ) { output.type[d] = input.type[d]; if( input.type[d] == nvinfer1::DimensionType::kSPATIAL ) { output.d[d] = int(input.d[d] * _scale[s++]); } else { output.d[d] = input.d[d]; } } return output; } int ResizeBilinearPlugin::initialize() { _output_dims = this->getOutputDimensions(0, &this->getInputDims(0), 1); assert(is_CHW(this->getInputDims(0))); assert(is_CHW(_output_dims)); assert(_ndims == 2); return 0; } __device__ void area_pixel_compute_source_index(float &rc, float scale, int dst_index, bool align_corners, bool cubic = false) { if (align_corners) { rc = scale * dst_index; return; } else { float src_idx = scale * (dst_index + 0.5) - 0.5; rc = (!cubic && src_idx < 0) ? float(0.0) : src_idx; return; } } template <typename Data> __global__ void resize_bilinear_kernel_2d(int n, int batchsize, int channels, int height1, int width1, int height2, int width2, float rheight, float rwidth, bool align_corners, Data const* idata, Data* odata) { const int in_batchsize_stride = channels * height1 * width1; const int in_channels_stride = height1 * width1; const int out_batchsize_stride = channels * height2 * width2; const int out_channels_stride = height2 * width2; int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < n) { const int w2 = index % width2; const int h2 = index / width2; if (height1 == height2 && width1 == width2) { const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; c++) { odata[n * out_batchsize_stride + c * out_channels_stride + h2 * width2 + w2] = idata[n * in_batchsize_stride + c * in_channels_stride + h1 * width1 + w1]; } } return; } // float h1r; area_pixel_compute_source_index(h1r, rheight, h2, align_corners, /*cubic=*/false); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const float h1lambda = h1r - h1; const float h0lambda = static_cast<float>(1) - h1lambda; // float w1r; area_pixel_compute_source_index(w1r, rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const float w1lambda = w1r - w1; const float w0lambda = static_cast<float>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const float val = h0lambda * (w0lambda * idata[n * in_batchsize_stride + c * in_channels_stride + h1 * width1 + w1] + w1lambda * idata[n * in_batchsize_stride + c * in_channels_stride + h1 * width1 + (w1 + w1p)]) + h1lambda * (w0lambda * idata[n * in_batchsize_stride + c * in_channels_stride + (h1 + h1p) * width1 + w1] + w1lambda * idata[n * in_batchsize_stride + c * in_channels_stride + (h1 + h1p) * width1 + (w1 + w1p)]); odata[n * out_batchsize_stride + c * out_channels_stride + h2 * width2 + w2] = val; } } } } float ResizeBilinearPlugin::area_pixel_compute_scale(int input_size, int output_size) { if(output_size > 1) { return _align_corners ? float(input_size - 1) / (output_size - 1) : float(input_size) / output_size; } else { return 0.0; } } int ResizeBilinearPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream) { auto const& input_dims = this->getInputDims(0); switch( _ndims ) { case 2: { const int channels = input_dims.d[0]; const int input_height = input_dims.d[1]; const int input_width = input_dims.d[2]; const int output_height = _output_dims.d[1]; const int output_width = _output_dims.d[2]; int obatchstride = _output_dims.d[1] * _output_dims.d[2]; int num_kernels = obatchstride; int num_threads = 512; int blocks = int((num_kernels + num_threads - 1) / num_threads); int grid = num_threads; float rheight = area_pixel_compute_scale(input_height, output_height); float rwidth = area_pixel_compute_scale(input_width, output_width); hipLaunchKernelGGL(( resize_bilinear_kernel_2d), dim3(blocks), dim3(grid), 0, stream, num_kernels, batchSize, channels, input_height, input_width, output_height, output_width, rheight, rwidth, _align_corners, static_cast<float const*>( inputs[0]), static_cast<float*>(outputs[0])); return hipGetLastError() != hipSuccess; } default: return -1; } }
b39d2d624eced07aecabb034a706c9f0b6e40cbc.cu
#include <cuda_fp16.h> #include <cassert> #include <algorithm> #include "ResizeBilinear.hpp" // TODO: Move this to a common header inline bool is_CHW(nvinfer1::Dims const& dims) { return (dims.nbDims == 3 && dims.type[0] == nvinfer1::DimensionType::kCHANNEL && dims.type[1] == nvinfer1::DimensionType::kSPATIAL && dims.type[2] == nvinfer1::DimensionType::kSPATIAL); } nvinfer1::Dims ResizeBilinearPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) { assert(nbInputs == 1); nvinfer1::Dims const& input = inputDims[0]; assert(is_CHW(input)); assert(_ndims == 2); assert(index == 0); nvinfer1::Dims output; output.nbDims = input.nbDims; int s = 0; for( int d=0; d<input.nbDims; ++d ) { output.type[d] = input.type[d]; if( input.type[d] == nvinfer1::DimensionType::kSPATIAL ) { output.d[d] = int(input.d[d] * _scale[s++]); } else { output.d[d] = input.d[d]; } } return output; } int ResizeBilinearPlugin::initialize() { _output_dims = this->getOutputDimensions(0, &this->getInputDims(0), 1); assert(is_CHW(this->getInputDims(0))); assert(is_CHW(_output_dims)); assert(_ndims == 2); return 0; } __device__ void area_pixel_compute_source_index(float &rc, float scale, int dst_index, bool align_corners, bool cubic = false) { if (align_corners) { rc = scale * dst_index; return; } else { float src_idx = scale * (dst_index + 0.5) - 0.5; rc = (!cubic && src_idx < 0) ? float(0.0) : src_idx; return; } } template <typename Data> __global__ void resize_bilinear_kernel_2d(int n, int batchsize, int channels, int height1, int width1, int height2, int width2, float rheight, float rwidth, bool align_corners, Data const* idata, Data* odata) { const int in_batchsize_stride = channels * height1 * width1; const int in_channels_stride = height1 * width1; const int out_batchsize_stride = channels * height2 * width2; const int out_channels_stride = height2 * width2; int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < n) { const int w2 = index % width2; const int h2 = index / width2; if (height1 == height2 && width1 == width2) { const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; c++) { odata[n * out_batchsize_stride + c * out_channels_stride + h2 * width2 + w2] = idata[n * in_batchsize_stride + c * in_channels_stride + h1 * width1 + w1]; } } return; } // float h1r; area_pixel_compute_source_index(h1r, rheight, h2, align_corners, /*cubic=*/false); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const float h1lambda = h1r - h1; const float h0lambda = static_cast<float>(1) - h1lambda; // float w1r; area_pixel_compute_source_index(w1r, rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const float w1lambda = w1r - w1; const float w0lambda = static_cast<float>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const float val = h0lambda * (w0lambda * idata[n * in_batchsize_stride + c * in_channels_stride + h1 * width1 + w1] + w1lambda * idata[n * in_batchsize_stride + c * in_channels_stride + h1 * width1 + (w1 + w1p)]) + h1lambda * (w0lambda * idata[n * in_batchsize_stride + c * in_channels_stride + (h1 + h1p) * width1 + w1] + w1lambda * idata[n * in_batchsize_stride + c * in_channels_stride + (h1 + h1p) * width1 + (w1 + w1p)]); odata[n * out_batchsize_stride + c * out_channels_stride + h2 * width2 + w2] = val; } } } } float ResizeBilinearPlugin::area_pixel_compute_scale(int input_size, int output_size) { if(output_size > 1) { return _align_corners ? float(input_size - 1) / (output_size - 1) : float(input_size) / output_size; } else { return 0.0; } } int ResizeBilinearPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) { auto const& input_dims = this->getInputDims(0); switch( _ndims ) { case 2: { const int channels = input_dims.d[0]; const int input_height = input_dims.d[1]; const int input_width = input_dims.d[2]; const int output_height = _output_dims.d[1]; const int output_width = _output_dims.d[2]; int obatchstride = _output_dims.d[1] * _output_dims.d[2]; int num_kernels = obatchstride; int num_threads = 512; int blocks = int((num_kernels + num_threads - 1) / num_threads); int grid = num_threads; float rheight = area_pixel_compute_scale(input_height, output_height); float rwidth = area_pixel_compute_scale(input_width, output_width); resize_bilinear_kernel_2d<<<blocks, grid, 0, stream>>>( num_kernels, batchSize, channels, input_height, input_width, output_height, output_width, rheight, rwidth, _align_corners, static_cast<float const*>( inputs[0]), static_cast<float*>(outputs[0])); return cudaGetLastError() != cudaSuccess; } default: return -1; } }
6fbc30ba7fc49b47d7ee93bc9524cbb42a09e5f9.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kmeans_hip.cuh" namespace ML { namespace kmeans { void fit_predict(const ML::cumlHandle &handle, int n_clusters, int metric, kmeans::InitMethod init, int max_iter, double tol, int seed, const float *X, int n_samples, int n_features, float *centroids, int *labels, int verbose) { const ML::cumlHandle_impl &h = handle.getImpl(); ML::detail::streamSyncer _(h); hipStream_t stream = h.getStream(); ML::KMeans<float> kmeans_obj( h, n_clusters, static_cast<MLCommon::Distance::DistanceType>(metric), init, max_iter, tol, seed, verbose); if (kmeans::InitMethod::Array == init) { ASSERT(centroids != nullptr, "centroids array is null (require a valid array of centroids for " "the requested initialization method)"); kmeans_obj.setCentroids(centroids, n_clusters, n_features); } kmeans_obj.fit(X, n_samples, n_features); if (labels) { kmeans_obj.predict(X, n_samples, n_features, labels); } MLCommon::copy(centroids, kmeans_obj.centroids(), n_clusters * n_features, stream); } void fit_predict(const ML::cumlHandle &handle, int n_clusters, int metric, kmeans::InitMethod init, int max_iter, double tol, int seed, const double *X, int n_samples, int n_features, double *centroids, int *labels, int verbose) { const ML::cumlHandle_impl &h = handle.getImpl(); ML::detail::streamSyncer _(h); hipStream_t stream = h.getStream(); ML::KMeans<double> kmeans_obj( h, n_clusters, static_cast<MLCommon::Distance::DistanceType>(metric), init, max_iter, tol, seed, verbose); if (kmeans::InitMethod::Array == init) { ASSERT(centroids != nullptr, "centroids array is null (require a valid array of centroids for " "the requested initialization method)"); kmeans_obj.setCentroids(centroids, n_clusters, n_features); } kmeans_obj.fit(X, n_samples, n_features); if (labels) { kmeans_obj.predict(X, n_samples, n_features, labels); } MLCommon::copy(centroids, kmeans_obj.centroids(), n_clusters * n_features, stream); } void fit(const ML::cumlHandle &handle, int n_clusters, int metric, kmeans::InitMethod init, int max_iter, double tol, int seed, const float *X, int n_samples, int n_features, float *centroids, int verbose) { fit_predict(handle, n_clusters, metric, init, max_iter, tol, seed, X, n_samples, n_features, centroids, nullptr, verbose); } void fit(const ML::cumlHandle &handle, int n_clusters, int metric, kmeans::InitMethod init, int max_iter, double tol, int seed, const double *X, int n_samples, int n_features, double *centroids, int verbose) { fit_predict(handle, n_clusters, metric, init, max_iter, tol, seed, X, n_samples, n_features, centroids, nullptr, verbose); } void predict(const ML::cumlHandle &handle, float *centroids, int n_clusters, const float *X, int n_samples, int n_features, int metric, int *labels, double *inertia, int verbose) { const ML::cumlHandle_impl &h = handle.getImpl(); ML::detail::streamSyncer _(h); hipStream_t stream = h.getStream(); ML::KMeans<float> kmeans_obj( h, n_clusters, static_cast<MLCommon::Distance::DistanceType>(metric)); kmeans_obj.setCentroids(centroids, n_clusters, n_features); kmeans_obj.predict(X, n_samples, n_features, labels); const double obj_inertia = -1 * kmeans_obj.getInertia(); std::memcpy(inertia, &obj_inertia, sizeof(double)); } void predict(const ML::cumlHandle &handle, double *centroids, int n_clusters, const double *X, int n_samples, int n_features, int metric, int *labels, double *inertia, int verbose) { const ML::cumlHandle_impl &h = handle.getImpl(); ML::detail::streamSyncer _(h); hipStream_t stream = h.getStream(); ML::KMeans<double> kmeans_obj( h, n_clusters, static_cast<MLCommon::Distance::DistanceType>(metric)); kmeans_obj.setCentroids(centroids, n_clusters, n_features); kmeans_obj.predict(X, n_samples, n_features, labels); const double obj_inertia = -1 * kmeans_obj.getInertia(); std::memcpy(inertia, &obj_inertia, sizeof(double)); } void transform(const ML::cumlHandle &handle, const float *centroids, int n_clusters, const float *X, int n_samples, int n_features, int metric, float *X_new, double *inertia, int verbose) { const ML::cumlHandle_impl &h = handle.getImpl(); ML::detail::streamSyncer _(h); hipStream_t stream = h.getStream(); ML::KMeans<float> kmeans_obj( h, n_clusters, static_cast<MLCommon::Distance::DistanceType>(metric)); kmeans_obj.setCentroids(centroids, n_clusters, n_features); kmeans_obj.transform(X, n_samples, n_features, X_new); const double obj_inertia = -1 * kmeans_obj.getInertia(); std::memcpy(inertia, &obj_inertia, sizeof(double)); } void transform(const ML::cumlHandle &handle, const double *centroids, int n_clusters, const double *X, int n_samples, int n_features, int metric, double *X_new, double *inertia, int verbose) { const ML::cumlHandle_impl &h = handle.getImpl(); ML::detail::streamSyncer _(h); hipStream_t stream = h.getStream(); ML::KMeans<double> kmeans_obj( h, n_clusters, static_cast<MLCommon::Distance::DistanceType>(metric)); kmeans_obj.setCentroids(centroids, n_clusters, n_features); kmeans_obj.transform(X, n_samples, n_features, X_new); const double obj_inertia = -1 * kmeans_obj.getInertia(); std::memcpy(inertia, &obj_inertia, sizeof(double)); } void score(const ML::cumlHandle &handle, float *centroids, int n_clusters, const float *X, int n_samples, int n_features, int metric, int *labels, double *inertia, int verbose) { predict(handle, centroids, n_clusters, X, n_samples, n_features, metric, labels, inertia, verbose); } void score(const ML::cumlHandle &handle, double *centroids, int n_clusters, const double *X, int n_samples, int n_features, int metric, int *labels, double *inertia, int verbose) { predict(handle, centroids, n_clusters, X, n_samples, n_features, metric, labels, inertia, verbose); } }; // end namespace kmeans }; // end namespace ML
6fbc30ba7fc49b47d7ee93bc9524cbb42a09e5f9.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kmeans.cuh" namespace ML { namespace kmeans { void fit_predict(const ML::cumlHandle &handle, int n_clusters, int metric, kmeans::InitMethod init, int max_iter, double tol, int seed, const float *X, int n_samples, int n_features, float *centroids, int *labels, int verbose) { const ML::cumlHandle_impl &h = handle.getImpl(); ML::detail::streamSyncer _(h); cudaStream_t stream = h.getStream(); ML::KMeans<float> kmeans_obj( h, n_clusters, static_cast<MLCommon::Distance::DistanceType>(metric), init, max_iter, tol, seed, verbose); if (kmeans::InitMethod::Array == init) { ASSERT(centroids != nullptr, "centroids array is null (require a valid array of centroids for " "the requested initialization method)"); kmeans_obj.setCentroids(centroids, n_clusters, n_features); } kmeans_obj.fit(X, n_samples, n_features); if (labels) { kmeans_obj.predict(X, n_samples, n_features, labels); } MLCommon::copy(centroids, kmeans_obj.centroids(), n_clusters * n_features, stream); } void fit_predict(const ML::cumlHandle &handle, int n_clusters, int metric, kmeans::InitMethod init, int max_iter, double tol, int seed, const double *X, int n_samples, int n_features, double *centroids, int *labels, int verbose) { const ML::cumlHandle_impl &h = handle.getImpl(); ML::detail::streamSyncer _(h); cudaStream_t stream = h.getStream(); ML::KMeans<double> kmeans_obj( h, n_clusters, static_cast<MLCommon::Distance::DistanceType>(metric), init, max_iter, tol, seed, verbose); if (kmeans::InitMethod::Array == init) { ASSERT(centroids != nullptr, "centroids array is null (require a valid array of centroids for " "the requested initialization method)"); kmeans_obj.setCentroids(centroids, n_clusters, n_features); } kmeans_obj.fit(X, n_samples, n_features); if (labels) { kmeans_obj.predict(X, n_samples, n_features, labels); } MLCommon::copy(centroids, kmeans_obj.centroids(), n_clusters * n_features, stream); } void fit(const ML::cumlHandle &handle, int n_clusters, int metric, kmeans::InitMethod init, int max_iter, double tol, int seed, const float *X, int n_samples, int n_features, float *centroids, int verbose) { fit_predict(handle, n_clusters, metric, init, max_iter, tol, seed, X, n_samples, n_features, centroids, nullptr, verbose); } void fit(const ML::cumlHandle &handle, int n_clusters, int metric, kmeans::InitMethod init, int max_iter, double tol, int seed, const double *X, int n_samples, int n_features, double *centroids, int verbose) { fit_predict(handle, n_clusters, metric, init, max_iter, tol, seed, X, n_samples, n_features, centroids, nullptr, verbose); } void predict(const ML::cumlHandle &handle, float *centroids, int n_clusters, const float *X, int n_samples, int n_features, int metric, int *labels, double *inertia, int verbose) { const ML::cumlHandle_impl &h = handle.getImpl(); ML::detail::streamSyncer _(h); cudaStream_t stream = h.getStream(); ML::KMeans<float> kmeans_obj( h, n_clusters, static_cast<MLCommon::Distance::DistanceType>(metric)); kmeans_obj.setCentroids(centroids, n_clusters, n_features); kmeans_obj.predict(X, n_samples, n_features, labels); const double obj_inertia = -1 * kmeans_obj.getInertia(); std::memcpy(inertia, &obj_inertia, sizeof(double)); } void predict(const ML::cumlHandle &handle, double *centroids, int n_clusters, const double *X, int n_samples, int n_features, int metric, int *labels, double *inertia, int verbose) { const ML::cumlHandle_impl &h = handle.getImpl(); ML::detail::streamSyncer _(h); cudaStream_t stream = h.getStream(); ML::KMeans<double> kmeans_obj( h, n_clusters, static_cast<MLCommon::Distance::DistanceType>(metric)); kmeans_obj.setCentroids(centroids, n_clusters, n_features); kmeans_obj.predict(X, n_samples, n_features, labels); const double obj_inertia = -1 * kmeans_obj.getInertia(); std::memcpy(inertia, &obj_inertia, sizeof(double)); } void transform(const ML::cumlHandle &handle, const float *centroids, int n_clusters, const float *X, int n_samples, int n_features, int metric, float *X_new, double *inertia, int verbose) { const ML::cumlHandle_impl &h = handle.getImpl(); ML::detail::streamSyncer _(h); cudaStream_t stream = h.getStream(); ML::KMeans<float> kmeans_obj( h, n_clusters, static_cast<MLCommon::Distance::DistanceType>(metric)); kmeans_obj.setCentroids(centroids, n_clusters, n_features); kmeans_obj.transform(X, n_samples, n_features, X_new); const double obj_inertia = -1 * kmeans_obj.getInertia(); std::memcpy(inertia, &obj_inertia, sizeof(double)); } void transform(const ML::cumlHandle &handle, const double *centroids, int n_clusters, const double *X, int n_samples, int n_features, int metric, double *X_new, double *inertia, int verbose) { const ML::cumlHandle_impl &h = handle.getImpl(); ML::detail::streamSyncer _(h); cudaStream_t stream = h.getStream(); ML::KMeans<double> kmeans_obj( h, n_clusters, static_cast<MLCommon::Distance::DistanceType>(metric)); kmeans_obj.setCentroids(centroids, n_clusters, n_features); kmeans_obj.transform(X, n_samples, n_features, X_new); const double obj_inertia = -1 * kmeans_obj.getInertia(); std::memcpy(inertia, &obj_inertia, sizeof(double)); } void score(const ML::cumlHandle &handle, float *centroids, int n_clusters, const float *X, int n_samples, int n_features, int metric, int *labels, double *inertia, int verbose) { predict(handle, centroids, n_clusters, X, n_samples, n_features, metric, labels, inertia, verbose); } void score(const ML::cumlHandle &handle, double *centroids, int n_clusters, const double *X, int n_samples, int n_features, int metric, int *labels, double *inertia, int verbose) { predict(handle, centroids, n_clusters, X, n_samples, n_features, metric, labels, inertia, verbose); } }; // end namespace kmeans }; // end namespace ML
896d05b3f062a1fc7c35afb67d434ce451503c78.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdlib> #include <time.h> #include <omp.h> #include "device_launch_parameters.h" #include "hip/hip_runtime.h" using namespace std; __global__ void saxpy_kernel(int size, float alpha, float* x, int incx, float* y, int incy) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) if (i * incx > size || i * incy > size) return; else y[i * incy] += alpha * x[i * incx]; } template <typename T> void axpy_cpu(int size, T alpha, const T *x, int incx, T *y, int incy) { for (int i = 0; i < size; i++) y[i * incy] += alpha * x[i * incx]; } template <typename T> void axpy_cpu_omp(int size, T alpha, const T *x, int incx, T *y, int incy) { #pragma omp parallel for for (int i = 0; i < size; i++) y[i * incy] += alpha * x[i * incx]; } int main() { int size = 200000000; float alpha = 1.0f; int incx = 1, incy = 1; float *x = new float[size], *x_gpu; float *y = new float[size], *y_gpu; float *result_gpu = new float[size]; hipMalloc((void**)&x_gpu, size * sizeof(float)); hipMalloc((void**)&y_gpu, size * sizeof(float)); for (int i = 0; i < size; i++) x[i] = y[i] = i; hipMemcpy(x_gpu, x, size * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(y_gpu, y, size * sizeof(float), hipMemcpyHostToDevice); clock_t start_cpu = clock(); axpy_cpu<float>(size, alpha, x, incx, y, incy); clock_t finish_cpu = clock(); float time_cpu = (float)(finish_cpu - start_cpu) / CLOCKS_PER_SEC; clock_t start_cpu_omp = clock(); axpy_cpu_omp<float>(size, alpha, x, incx, y, incy); clock_t finish_cpu_omp = clock(); float time_cpu_omp = (float)(finish_cpu_omp - start_cpu_omp) / CLOCKS_PER_SEC; const int block_size = 256; int num_block = (size + block_size - 1) / block_size; clock_t start_gpu = clock(); hipLaunchKernelGGL(( saxpy_kernel) , dim3(num_block), dim3(block_size) , 0, 0, size, alpha, x_gpu, incx, y_gpu, incy); hipDeviceSynchronize(); clock_t finish_gpu = clock(); float time_gpu = (float)(finish_gpu - start_gpu) / CLOCKS_PER_SEC; hipMemcpy(result_gpu, y_gpu, size * sizeof(float), hipMemcpyDeviceToHost); bool flag = false; #pragma omp parallel for for (int i = 0; i < size; i++) if (y[i] != result_gpu[i]) { flag = true; break; } /*if (flag == true) cout << "Not equal" << endl; else cout << "Equal" << endl;*/ cout << "time_cpu = " << time_cpu << endl << "time_cpu_omp = " << time_cpu_omp << endl << "time_gpu = " << time_gpu << endl; system("pause"); delete[]x; delete[]y; hipFree(x_gpu); hipFree(y_gpu); return 0; }
896d05b3f062a1fc7c35afb67d434ce451503c78.cu
#include <iostream> #include <cstdlib> #include <time.h> #include <omp.h> #include "device_launch_parameters.h" #include "cuda_runtime.h" using namespace std; __global__ void saxpy_kernel(int size, float alpha, float* x, int incx, float* y, int incy) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) if (i * incx > size || i * incy > size) return; else y[i * incy] += alpha * x[i * incx]; } template <typename T> void axpy_cpu(int size, T alpha, const T *x, int incx, T *y, int incy) { for (int i = 0; i < size; i++) y[i * incy] += alpha * x[i * incx]; } template <typename T> void axpy_cpu_omp(int size, T alpha, const T *x, int incx, T *y, int incy) { #pragma omp parallel for for (int i = 0; i < size; i++) y[i * incy] += alpha * x[i * incx]; } int main() { int size = 200000000; float alpha = 1.0f; int incx = 1, incy = 1; float *x = new float[size], *x_gpu; float *y = new float[size], *y_gpu; float *result_gpu = new float[size]; cudaMalloc((void**)&x_gpu, size * sizeof(float)); cudaMalloc((void**)&y_gpu, size * sizeof(float)); for (int i = 0; i < size; i++) x[i] = y[i] = i; cudaMemcpy(x_gpu, x, size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(y_gpu, y, size * sizeof(float), cudaMemcpyHostToDevice); clock_t start_cpu = clock(); axpy_cpu<float>(size, alpha, x, incx, y, incy); clock_t finish_cpu = clock(); float time_cpu = (float)(finish_cpu - start_cpu) / CLOCKS_PER_SEC; clock_t start_cpu_omp = clock(); axpy_cpu_omp<float>(size, alpha, x, incx, y, incy); clock_t finish_cpu_omp = clock(); float time_cpu_omp = (float)(finish_cpu_omp - start_cpu_omp) / CLOCKS_PER_SEC; const int block_size = 256; int num_block = (size + block_size - 1) / block_size; clock_t start_gpu = clock(); saxpy_kernel <<< num_block, block_size >>> (size, alpha, x_gpu, incx, y_gpu, incy); cudaDeviceSynchronize(); clock_t finish_gpu = clock(); float time_gpu = (float)(finish_gpu - start_gpu) / CLOCKS_PER_SEC; cudaMemcpy(result_gpu, y_gpu, size * sizeof(float), cudaMemcpyDeviceToHost); bool flag = false; #pragma omp parallel for for (int i = 0; i < size; i++) if (y[i] != result_gpu[i]) { flag = true; break; } /*if (flag == true) cout << "Not equal" << endl; else cout << "Equal" << endl;*/ cout << "time_cpu = " << time_cpu << endl << "time_cpu_omp = " << time_cpu_omp << endl << "time_gpu = " << time_gpu << endl; system("pause"); delete[]x; delete[]y; cudaFree(x_gpu); cudaFree(y_gpu); return 0; }
1dbdedb9dc0559fbcb211c4cb87faa8959e77872.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Code for the equation solver. Author: Naga Kandasamy Date modified: 3/4/2018 */ #include <stdio.h> #include <string.h> #include <malloc.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include "grid.h" // This file defines the grid data structure // Include the kernel code during the preprocessing step #include "solver_kernel.cu" extern "C" void compute_gold(GRID_STRUCT *); float cpuTime; /* This function prints the grid on the screen */ void display_grid(GRID_STRUCT *my_grid) { for(int i = 0; i < my_grid->dimension; i++) for(int j = 0; j < my_grid->dimension; j++) printf("%f \t", my_grid->element[i * my_grid->dimension + j]); printf("\n"); } /* This function prints out statistics for the converged values, including min, max, and average. */ void print_statistics(GRID_STRUCT *my_grid) { // Print statistics for the CPU grid float min = INFINITY; float max = 0.0; double sum = 0.0; for(int i = 0; i < my_grid->dimension; i++){ for(int j = 0; j < my_grid->dimension; j++){ sum += my_grid->element[i * my_grid->dimension + j]; // Compute the sum if(my_grid->element[i * my_grid->dimension + j] > max) max = my_grid->element[i * my_grid->dimension + j]; // Determine max if(my_grid->element[i * my_grid->dimension + j] < min) min = my_grid->element[i * my_grid->dimension + j]; // Determine min } } printf("AVG: %f \n", sum/(float)my_grid->num_elements); printf("MIN: %f \n", min); printf("MAX: %f \n", max); printf("\n"); } /* Calculate the differences between grid elements for the various implementations. */ void compute_grid_differences(GRID_STRUCT *grid_1, GRID_STRUCT *grid_2) { double diff; int dimension = grid_1->dimension; int num_elements = dimension*dimension; diff = 0.0; for(int i = 0; i < grid_1->dimension; i++){ for(int j = 0; j < grid_1->dimension; j++){ diff += fabsf(grid_1->element[i * dimension + j] - grid_2->element[i * dimension + j]); } } printf("Average difference in grid elements for Gauss Seidel and Jacobi methods = %f. \n", \ diff/num_elements); } /* This function creates a grid of random floating point values bounded by UPPER_BOUND_ON_GRID_VALUE */ void create_grids(GRID_STRUCT *grid_for_cpu, GRID_STRUCT *grid_for_gpu) { printf("Creating a grid of dimension %d x %d. \n", grid_for_cpu->dimension, grid_for_cpu->dimension); grid_for_cpu->element = (float *)malloc(sizeof(float) * grid_for_cpu->num_elements); grid_for_gpu->element = (float *)malloc(sizeof(float) * grid_for_gpu->num_elements); srand((unsigned)time(NULL)); // Seed the the random number generator float val; for(int i = 0; i < grid_for_cpu->dimension; i++) for(int j = 0; j < grid_for_cpu->dimension; j++){ val = ((float)rand()/(float)RAND_MAX) * UPPER_BOUND_ON_GRID_VALUE; // Obtain a random value grid_for_cpu->element[i * grid_for_cpu->dimension + j] = val; grid_for_gpu->element[i * grid_for_gpu->dimension + j] = val; } } /* Edit this function skeleton to solve the equation on the device. Store the results back in the my_grid->element data structure for comparison with the CPU result. */ void compute_on_device(GRID_STRUCT *src) { //GRID_STRUCT *temp = (GRID_STRUCT *)malloc(sizeof(GRID_STRUCT)); GRID_STRUCT *dest = (GRID_STRUCT *)malloc(sizeof(GRID_STRUCT)); dest->dimension = GRID_DIMENSION; dest->dimension = src->dimension *src->dimension; struct timeval start, stop; double diff = 0; double *Diff_on_device; float *A_on_device; float *B_on_device; float *tmpPtr; //allocate memory for src, dest, and diff on GPU hipMalloc((void**)&A_on_device, GRID_DIMENSION*GRID_DIMENSION*sizeof(float)); hipMalloc((void**)&B_on_device, GRID_DIMENSION*GRID_DIMENSION*sizeof(float)); hipMalloc((void**)&Diff_on_device, sizeof(double)); gettimeofday(&start, NULL); hipMemcpy(A_on_device, src->element, GRID_DIMENSION*GRID_DIMENSION*sizeof(float), hipMemcpyHostToDevice); // setup grid and thread blocks dim3 grid(GRID_SIZE, GRID_SIZE); dim3 thread_block(BLOCK_SIZE, BLOCK_SIZE); printf("Creating grid of size %dx%d blocks\n", GRID_SIZE, GRID_SIZE); printf("Creating block of size %dx%d threads\n", BLOCK_SIZE, BLOCK_SIZE); int done = 0, cnt = 0; while(!done) { //launch the kernel diff = (double)5; hipMemcpy(Diff_on_device, &diff, sizeof(double), hipMemcpyHostToDevice); printf("executing kernel...\n"); hipLaunchKernelGGL(( solver_kernel_naive), dim3(grid), dim3(thread_block), 0, 0, A_on_device, B_on_device, Diff_on_device); //solver_kernel_optimized<<<grid, thread_block>>>(A_on_device, B_on_device, Diff_on_device); hipDeviceSynchronize(); //copy diff from the GPU only a single value hipMemcpy(&diff, Diff_on_device, sizeof(double), hipMemcpyDeviceToHost); printf("GPU iteration %d : diff = %f\n", ++cnt, diff); if( (diff/(GRID_DIMENSION*GRID_DIMENSION)) < TOLERANCE ) { done = 1; } // most boring game of ping-pong I've ever played tmpPtr = A_on_device; A_on_device = B_on_device; B_on_device = tmpPtr; } gettimeofday(&stop, NULL); float gpuTime = (float)(stop.tv_sec - start.tv_sec+(stop.tv_usec - start.tv_usec)/(float)1000000); float speedUp = cpuTime / gpuTime; printf(">> Speedup = %f\n", speedUp); //Copy dest from the GPU, because we need the final output hipMemcpy(dest->element, A_on_device, GRID_DIMENSION*GRID_DIMENSION*sizeof(float), hipMemcpyDeviceToHost); src=dest; //free memory on GPU hipFree(A_on_device); hipFree(B_on_device); hipFree(Diff_on_device); } /* The main function */ int main(int argc, char **argv) { /* Generate the grid */ GRID_STRUCT *grid_for_cpu = (GRID_STRUCT *)malloc(sizeof(GRID_STRUCT)); // The grid data structure GRID_STRUCT *grid_for_gpu = (GRID_STRUCT *)malloc(sizeof(GRID_STRUCT)); // The grid data structure grid_for_cpu->dimension = GRID_DIMENSION; grid_for_cpu->num_elements = grid_for_cpu->dimension * grid_for_cpu->dimension; grid_for_gpu->dimension = GRID_DIMENSION; grid_for_gpu->num_elements = grid_for_gpu->dimension * grid_for_gpu->dimension; create_grids(grid_for_cpu, grid_for_gpu); // Create the grids and populate them with the same set of random values printf("Using the cpu to solve the grid. \n"); struct timeval start, stop; gettimeofday(&start, NULL); compute_gold(grid_for_cpu); // Use CPU to solve gettimeofday(&stop, NULL); cpuTime = (float)(stop.tv_sec - start.tv_sec+(stop.tv_usec - start.tv_usec)/(float)1000000); // Use the GPU to solve the equation compute_on_device(grid_for_gpu); // Print key statistics for the converged values printf("CPU: \n"); print_statistics(grid_for_cpu); printf("GPU: \n"); print_statistics(grid_for_gpu); /* Compute grid differences. */ compute_grid_differences(grid_for_cpu, grid_for_gpu); free((void *)grid_for_cpu->element); free((void *)grid_for_cpu); // Free the grid data structure free((void *)grid_for_gpu->element); free((void *)grid_for_gpu); // Free the grid data structure exit(0); }
1dbdedb9dc0559fbcb211c4cb87faa8959e77872.cu
/* Code for the equation solver. Author: Naga Kandasamy Date modified: 3/4/2018 */ #include <stdio.h> #include <string.h> #include <malloc.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include "grid.h" // This file defines the grid data structure // Include the kernel code during the preprocessing step #include "solver_kernel.cu" extern "C" void compute_gold(GRID_STRUCT *); float cpuTime; /* This function prints the grid on the screen */ void display_grid(GRID_STRUCT *my_grid) { for(int i = 0; i < my_grid->dimension; i++) for(int j = 0; j < my_grid->dimension; j++) printf("%f \t", my_grid->element[i * my_grid->dimension + j]); printf("\n"); } /* This function prints out statistics for the converged values, including min, max, and average. */ void print_statistics(GRID_STRUCT *my_grid) { // Print statistics for the CPU grid float min = INFINITY; float max = 0.0; double sum = 0.0; for(int i = 0; i < my_grid->dimension; i++){ for(int j = 0; j < my_grid->dimension; j++){ sum += my_grid->element[i * my_grid->dimension + j]; // Compute the sum if(my_grid->element[i * my_grid->dimension + j] > max) max = my_grid->element[i * my_grid->dimension + j]; // Determine max if(my_grid->element[i * my_grid->dimension + j] < min) min = my_grid->element[i * my_grid->dimension + j]; // Determine min } } printf("AVG: %f \n", sum/(float)my_grid->num_elements); printf("MIN: %f \n", min); printf("MAX: %f \n", max); printf("\n"); } /* Calculate the differences between grid elements for the various implementations. */ void compute_grid_differences(GRID_STRUCT *grid_1, GRID_STRUCT *grid_2) { double diff; int dimension = grid_1->dimension; int num_elements = dimension*dimension; diff = 0.0; for(int i = 0; i < grid_1->dimension; i++){ for(int j = 0; j < grid_1->dimension; j++){ diff += fabsf(grid_1->element[i * dimension + j] - grid_2->element[i * dimension + j]); } } printf("Average difference in grid elements for Gauss Seidel and Jacobi methods = %f. \n", \ diff/num_elements); } /* This function creates a grid of random floating point values bounded by UPPER_BOUND_ON_GRID_VALUE */ void create_grids(GRID_STRUCT *grid_for_cpu, GRID_STRUCT *grid_for_gpu) { printf("Creating a grid of dimension %d x %d. \n", grid_for_cpu->dimension, grid_for_cpu->dimension); grid_for_cpu->element = (float *)malloc(sizeof(float) * grid_for_cpu->num_elements); grid_for_gpu->element = (float *)malloc(sizeof(float) * grid_for_gpu->num_elements); srand((unsigned)time(NULL)); // Seed the the random number generator float val; for(int i = 0; i < grid_for_cpu->dimension; i++) for(int j = 0; j < grid_for_cpu->dimension; j++){ val = ((float)rand()/(float)RAND_MAX) * UPPER_BOUND_ON_GRID_VALUE; // Obtain a random value grid_for_cpu->element[i * grid_for_cpu->dimension + j] = val; grid_for_gpu->element[i * grid_for_gpu->dimension + j] = val; } } /* Edit this function skeleton to solve the equation on the device. Store the results back in the my_grid->element data structure for comparison with the CPU result. */ void compute_on_device(GRID_STRUCT *src) { //GRID_STRUCT *temp = (GRID_STRUCT *)malloc(sizeof(GRID_STRUCT)); GRID_STRUCT *dest = (GRID_STRUCT *)malloc(sizeof(GRID_STRUCT)); dest->dimension = GRID_DIMENSION; dest->dimension = src->dimension *src->dimension; struct timeval start, stop; double diff = 0; double *Diff_on_device; float *A_on_device; float *B_on_device; float *tmpPtr; //allocate memory for src, dest, and diff on GPU cudaMalloc((void**)&A_on_device, GRID_DIMENSION*GRID_DIMENSION*sizeof(float)); cudaMalloc((void**)&B_on_device, GRID_DIMENSION*GRID_DIMENSION*sizeof(float)); cudaMalloc((void**)&Diff_on_device, sizeof(double)); gettimeofday(&start, NULL); cudaMemcpy(A_on_device, src->element, GRID_DIMENSION*GRID_DIMENSION*sizeof(float), cudaMemcpyHostToDevice); // setup grid and thread blocks dim3 grid(GRID_SIZE, GRID_SIZE); dim3 thread_block(BLOCK_SIZE, BLOCK_SIZE); printf("Creating grid of size %dx%d blocks\n", GRID_SIZE, GRID_SIZE); printf("Creating block of size %dx%d threads\n", BLOCK_SIZE, BLOCK_SIZE); int done = 0, cnt = 0; while(!done) { //launch the kernel diff = (double)5; cudaMemcpy(Diff_on_device, &diff, sizeof(double), cudaMemcpyHostToDevice); printf("executing kernel...\n"); solver_kernel_naive<<<grid, thread_block>>>(A_on_device, B_on_device, Diff_on_device); //solver_kernel_optimized<<<grid, thread_block>>>(A_on_device, B_on_device, Diff_on_device); cudaThreadSynchronize(); //copy diff from the GPU only a single value cudaMemcpy(&diff, Diff_on_device, sizeof(double), cudaMemcpyDeviceToHost); printf("GPU iteration %d : diff = %f\n", ++cnt, diff); if( (diff/(GRID_DIMENSION*GRID_DIMENSION)) < TOLERANCE ) { done = 1; } // most boring game of ping-pong I've ever played tmpPtr = A_on_device; A_on_device = B_on_device; B_on_device = tmpPtr; } gettimeofday(&stop, NULL); float gpuTime = (float)(stop.tv_sec - start.tv_sec+(stop.tv_usec - start.tv_usec)/(float)1000000); float speedUp = cpuTime / gpuTime; printf(">> Speedup = %f\n", speedUp); //Copy dest from the GPU, because we need the final output cudaMemcpy(dest->element, A_on_device, GRID_DIMENSION*GRID_DIMENSION*sizeof(float), cudaMemcpyDeviceToHost); src=dest; //free memory on GPU cudaFree(A_on_device); cudaFree(B_on_device); cudaFree(Diff_on_device); } /* The main function */ int main(int argc, char **argv) { /* Generate the grid */ GRID_STRUCT *grid_for_cpu = (GRID_STRUCT *)malloc(sizeof(GRID_STRUCT)); // The grid data structure GRID_STRUCT *grid_for_gpu = (GRID_STRUCT *)malloc(sizeof(GRID_STRUCT)); // The grid data structure grid_for_cpu->dimension = GRID_DIMENSION; grid_for_cpu->num_elements = grid_for_cpu->dimension * grid_for_cpu->dimension; grid_for_gpu->dimension = GRID_DIMENSION; grid_for_gpu->num_elements = grid_for_gpu->dimension * grid_for_gpu->dimension; create_grids(grid_for_cpu, grid_for_gpu); // Create the grids and populate them with the same set of random values printf("Using the cpu to solve the grid. \n"); struct timeval start, stop; gettimeofday(&start, NULL); compute_gold(grid_for_cpu); // Use CPU to solve gettimeofday(&stop, NULL); cpuTime = (float)(stop.tv_sec - start.tv_sec+(stop.tv_usec - start.tv_usec)/(float)1000000); // Use the GPU to solve the equation compute_on_device(grid_for_gpu); // Print key statistics for the converged values printf("CPU: \n"); print_statistics(grid_for_cpu); printf("GPU: \n"); print_statistics(grid_for_gpu); /* Compute grid differences. */ compute_grid_differences(grid_for_cpu, grid_for_gpu); free((void *)grid_for_cpu->element); free((void *)grid_for_cpu); // Free the grid data structure free((void *)grid_for_gpu->element); free((void *)grid_for_gpu); // Free the grid data structure exit(0); }
c1c9afd4cf6eb21242bcbe52370002c6c1c0524f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<math.h> #include<cuda.h> #include<cuda_runtime.h> #include<device_launch_parameters.h> #include<cstdlib> #include"compute_stress.h" //#include<Python.h> //#include<helper_cuda.h> #define BLOCK_X 32 #define BLOCK_Y 32 __global__ void compute_stress_gpu(double *pos, double *dist, double *out, int n_nodes){ int i = threadIdx.x + blockIdx.x * BLOCK_X; int j = threadIdx.y + blockIdx.y * BLOCK_Y; int idxx = threadIdx.x; int idxy = threadIdx.y; __shared__ double shared_nums[BLOCK_X][BLOCK_Y]; if (i == j || i >= n_nodes || j >= n_nodes){ shared_nums[idxx][idxy] = 0; } else{ double d = sqrt((pos[2 * i] - pos[2 * j]) * (pos[2 * i] - pos[2 * j]) + (pos[2 * i + 1] - pos[2 * j + 1]) * (pos[2 * i + 1] - pos[2 * j + 1])); shared_nums[idxx][idxy] = (d / dist[i * n_nodes + j] - 1); shared_nums[idxx][idxy] *= shared_nums[idxx][idxy]; } // reduction on Y for (unsigned int stride = BLOCK_Y / 2; stride > 0; stride /= 2){ __syncthreads(); if (idxy < stride && idxy + stride < n_nodes){ shared_nums[idxx][idxy] += shared_nums[idxx][idxy + stride]; } } // reduction on X if (idxy == 0) { for (unsigned int stride = BLOCK_X / 2; stride > 0; stride /= 2) { __syncthreads(); if (idxx < stride && idxx + stride < n_nodes) { shared_nums[idxx][0] += shared_nums[idxx + stride][0]; } } } // atomic sum __syncthreads(); if (idxx == 0 && idxy == 0){ atomicAdd(out, shared_nums[0][0]); } } double compute_stress(double *pos, double *dist, int n){ int i, j; double ans = 0; for (i = 0; i < n; ++i){ for (j = 0; j < n; ++j){ if (i == j){ continue; } double d = sqrt((pos[2 * i] - pos[2 * j]) * (pos[2 * i] - pos[2 * j]) + (pos[2 * i + 1] - pos[2 * j + 1]) * (pos[2 * i + 1] - pos[2 * j + 1])); d = (d / dist[i * n + j] - 1); d *= d; ans += d; } } return ans; } int f(int n){ double *pos, *dist, out; double *d_pos, *d_dist, *d_out; pos = (double*)malloc(sizeof(double) * n * 2); dist = (double*)malloc(sizeof(double) * n * n); hipMalloc((void**)&d_pos, sizeof(double) * n * 2); hipMalloc((void**)&d_dist, sizeof(double) * n * n); hipMalloc((void**)&d_out, sizeof(double)); // initialize for (int i = 0; i < n; ++i){ pos[i * 2] = rand() / double(RAND_MAX); pos[i * 2 + 1] = rand() / (double)(RAND_MAX); } for (int i = 0; i < n * n; ++i){ dist[i] = double(rand() % 10) + 1; } hipMemcpy(d_pos, pos, sizeof(double) * n * 2, hipMemcpyHostToDevice); hipMemcpy(d_dist, dist, sizeof(double) * n * n, hipMemcpyHostToDevice); hipMemset(d_out, 0, sizeof(double)); int bx = 1 + (n - 1) / BLOCK_X; int by = 1 + (n - 1) / BLOCK_Y; dim3 dimGrid(bx, by); dim3 dimBlock(BLOCK_X, BLOCK_Y); hipLaunchKernelGGL(( compute_stress_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, d_pos, d_dist, d_out, n); hipMemcpy(&out, d_out, sizeof(double), hipMemcpyDeviceToHost); double normal_ans = compute_stress(pos, dist, n); printf("%f\n", out); printf("%f\n", normal_ans); hipFree(d_out); hipFree(d_pos); hipFree(d_dist); free(pos); free(dist); hipDeviceReset(); return 0; }
c1c9afd4cf6eb21242bcbe52370002c6c1c0524f.cu
#include<stdio.h> #include<math.h> #include<cuda.h> #include<cuda_runtime.h> #include<device_launch_parameters.h> #include<cstdlib> #include"compute_stress.h" //#include<Python.h> //#include<helper_cuda.h> #define BLOCK_X 32 #define BLOCK_Y 32 __global__ void compute_stress_gpu(double *pos, double *dist, double *out, int n_nodes){ int i = threadIdx.x + blockIdx.x * BLOCK_X; int j = threadIdx.y + blockIdx.y * BLOCK_Y; int idxx = threadIdx.x; int idxy = threadIdx.y; __shared__ double shared_nums[BLOCK_X][BLOCK_Y]; if (i == j || i >= n_nodes || j >= n_nodes){ shared_nums[idxx][idxy] = 0; } else{ double d = sqrt((pos[2 * i] - pos[2 * j]) * (pos[2 * i] - pos[2 * j]) + (pos[2 * i + 1] - pos[2 * j + 1]) * (pos[2 * i + 1] - pos[2 * j + 1])); shared_nums[idxx][idxy] = (d / dist[i * n_nodes + j] - 1); shared_nums[idxx][idxy] *= shared_nums[idxx][idxy]; } // reduction on Y for (unsigned int stride = BLOCK_Y / 2; stride > 0; stride /= 2){ __syncthreads(); if (idxy < stride && idxy + stride < n_nodes){ shared_nums[idxx][idxy] += shared_nums[idxx][idxy + stride]; } } // reduction on X if (idxy == 0) { for (unsigned int stride = BLOCK_X / 2; stride > 0; stride /= 2) { __syncthreads(); if (idxx < stride && idxx + stride < n_nodes) { shared_nums[idxx][0] += shared_nums[idxx + stride][0]; } } } // atomic sum __syncthreads(); if (idxx == 0 && idxy == 0){ atomicAdd(out, shared_nums[0][0]); } } double compute_stress(double *pos, double *dist, int n){ int i, j; double ans = 0; for (i = 0; i < n; ++i){ for (j = 0; j < n; ++j){ if (i == j){ continue; } double d = sqrt((pos[2 * i] - pos[2 * j]) * (pos[2 * i] - pos[2 * j]) + (pos[2 * i + 1] - pos[2 * j + 1]) * (pos[2 * i + 1] - pos[2 * j + 1])); d = (d / dist[i * n + j] - 1); d *= d; ans += d; } } return ans; } int f(int n){ double *pos, *dist, out; double *d_pos, *d_dist, *d_out; pos = (double*)malloc(sizeof(double) * n * 2); dist = (double*)malloc(sizeof(double) * n * n); cudaMalloc((void**)&d_pos, sizeof(double) * n * 2); cudaMalloc((void**)&d_dist, sizeof(double) * n * n); cudaMalloc((void**)&d_out, sizeof(double)); // initialize for (int i = 0; i < n; ++i){ pos[i * 2] = rand() / double(RAND_MAX); pos[i * 2 + 1] = rand() / (double)(RAND_MAX); } for (int i = 0; i < n * n; ++i){ dist[i] = double(rand() % 10) + 1; } cudaMemcpy(d_pos, pos, sizeof(double) * n * 2, cudaMemcpyHostToDevice); cudaMemcpy(d_dist, dist, sizeof(double) * n * n, cudaMemcpyHostToDevice); cudaMemset(d_out, 0, sizeof(double)); int bx = 1 + (n - 1) / BLOCK_X; int by = 1 + (n - 1) / BLOCK_Y; dim3 dimGrid(bx, by); dim3 dimBlock(BLOCK_X, BLOCK_Y); compute_stress_gpu<<<dimGrid, dimBlock>>>(d_pos, d_dist, d_out, n); cudaMemcpy(&out, d_out, sizeof(double), cudaMemcpyDeviceToHost); double normal_ans = compute_stress(pos, dist, n); printf("%f\n", out); printf("%f\n", normal_ans); cudaFree(d_out); cudaFree(d_pos); cudaFree(d_dist); free(pos); free(dist); cudaDeviceReset(); return 0; }
db9f587e52f31f7fee7d6b9611e999f050d6ef1f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2021 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for * license information. */ #ifdef USE_ROCM #include "cuda_regression_objective.hpp" #include <LightGBM/cuda/cuda_algorithms.hpp> namespace LightGBM { template <typename HOST_OBJECTIVE> void CUDARegressionObjectiveInterface<HOST_OBJECTIVE>::Init(const Metadata& metadata, data_size_t num_data) { CUDAObjectiveInterface<HOST_OBJECTIVE>::Init(metadata, num_data); const data_size_t num_get_gradients_blocks = (this->num_data_ + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; cuda_block_buffer_.Resize(static_cast<size_t>(num_get_gradients_blocks)); if (this->sqrt_) { cuda_trans_label_.Resize(this->trans_label_.size()); CopyFromHostToCUDADevice<label_t>(cuda_trans_label_.RawData(), this->trans_label_.data(), this->trans_label_.size(), __FILE__, __LINE__); this->cuda_labels_ = cuda_trans_label_.RawData(); } } template void CUDARegressionObjectiveInterface<RegressionL2loss>::Init(const Metadata& metadata, data_size_t num_data); template void CUDARegressionObjectiveInterface<RegressionL1loss>::Init(const Metadata& metadata, data_size_t num_data); template void CUDARegressionObjectiveInterface<RegressionHuberLoss>::Init(const Metadata& metadata, data_size_t num_data); template void CUDARegressionObjectiveInterface<RegressionFairLoss>::Init(const Metadata& metadata, data_size_t num_data); template void CUDARegressionObjectiveInterface<RegressionPoissonLoss>::Init(const Metadata& metadata, data_size_t num_data); template void CUDARegressionObjectiveInterface<RegressionQuantileloss>::Init(const Metadata& metadata, data_size_t num_data); template <typename HOST_OBJECTIVE> double CUDARegressionObjectiveInterface<HOST_OBJECTIVE>::LaunchCalcInitScoreKernel(const int /*class_id*/) const { double label_sum = 0.0f, weight_sum = 0.0f; if (this->cuda_weights_ == nullptr) { ShuffleReduceSumGlobal<label_t, double>(this->cuda_labels_, static_cast<size_t>(this->num_data_), cuda_block_buffer_.RawData()); CopyFromCUDADeviceToHost<double>(&label_sum, cuda_block_buffer_.RawData(), 1, __FILE__, __LINE__); weight_sum = static_cast<double>(this->num_data_); } else { ShuffleReduceDotProdGlobal<label_t, double>(this->cuda_labels_, this->cuda_weights_, static_cast<size_t>(this->num_data_), cuda_block_buffer_.RawData()); CopyFromCUDADeviceToHost<double>(&label_sum, cuda_block_buffer_.RawData(), 1, __FILE__, __LINE__); ShuffleReduceSumGlobal<label_t, double>(this->cuda_weights_, static_cast<size_t>(this->num_data_), cuda_block_buffer_.RawData()); CopyFromCUDADeviceToHost<double>(&weight_sum, cuda_block_buffer_.RawData(), 1, __FILE__, __LINE__); } return label_sum / weight_sum; } template double CUDARegressionObjectiveInterface<RegressionL2loss>::LaunchCalcInitScoreKernel(const int class_id) const; template double CUDARegressionObjectiveInterface<RegressionL1loss>::LaunchCalcInitScoreKernel(const int class_id) const; template double CUDARegressionObjectiveInterface<RegressionHuberLoss>::LaunchCalcInitScoreKernel(const int class_id) const; template double CUDARegressionObjectiveInterface<RegressionFairLoss>::LaunchCalcInitScoreKernel(const int class_id) const; template double CUDARegressionObjectiveInterface<RegressionPoissonLoss>::LaunchCalcInitScoreKernel(const int class_id) const; template double CUDARegressionObjectiveInterface<RegressionQuantileloss>::LaunchCalcInitScoreKernel(const int class_id) const; __global__ void ConvertOutputCUDAKernel_Regression(const bool sqrt, const data_size_t num_data, const double* input, double* output) { const int data_index = static_cast<data_size_t>(blockIdx.x * blockDim.x + threadIdx.x); if (data_index < num_data) { if (sqrt) { const double sign = input[data_index] >= 0.0f ? 1 : -1; output[data_index] = sign * input[data_index] * input[data_index]; } else { output[data_index] = input[data_index]; } } } const double* CUDARegressionL2loss::LaunchConvertOutputCUDAKernel(const data_size_t num_data, const double* input, double* output) const { const int num_blocks = (num_data + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; if (sqrt_) { hipLaunchKernelGGL(( ConvertOutputCUDAKernel_Regression), dim3(num_blocks), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION), 0, 0, sqrt_, num_data, input, output); return output; } else { return input; } } template <bool USE_WEIGHT> __global__ void GetGradientsKernel_RegressionL2(const double* cuda_scores, const label_t* cuda_labels, const label_t* cuda_weights, const data_size_t num_data, score_t* cuda_out_gradients, score_t* cuda_out_hessians) { const data_size_t data_index = static_cast<data_size_t>(blockDim.x * blockIdx.x + threadIdx.x); if (data_index < num_data) { if (!USE_WEIGHT) { cuda_out_gradients[data_index] = static_cast<score_t>(cuda_scores[data_index] - cuda_labels[data_index]); cuda_out_hessians[data_index] = 1.0f; } else { const score_t weight = static_cast<score_t>(cuda_weights[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>(cuda_scores[data_index] - cuda_labels[data_index]) * weight; cuda_out_hessians[data_index] = weight; } } } void CUDARegressionL2loss::LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const { const int num_blocks = (num_data_ + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; if (cuda_weights_ == nullptr) { hipLaunchKernelGGL(( GetGradientsKernel_RegressionL2<false>), dim3(num_blocks), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION), 0, 0, score, cuda_labels_, nullptr, num_data_, gradients, hessians); } else { hipLaunchKernelGGL(( GetGradientsKernel_RegressionL2<true>), dim3(num_blocks), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION), 0, 0, score, cuda_labels_, cuda_weights_, num_data_, gradients, hessians); } } double CUDARegressionL1loss::LaunchCalcInitScoreKernel(const int /*class_id*/) const { const double alpha = 0.5f; if (cuda_weights_ == nullptr) { PercentileGlobal<label_t, data_size_t, label_t, double, false, false>( cuda_labels_, nullptr, cuda_data_indices_buffer_.RawData(), nullptr, nullptr, alpha, num_data_, cuda_percentile_result_.RawData()); } else { PercentileGlobal<label_t, data_size_t, label_t, double, false, true>( cuda_labels_, cuda_weights_, cuda_data_indices_buffer_.RawData(), cuda_weights_prefix_sum_.RawData(), cuda_weights_prefix_sum_buffer_.RawData(), alpha, num_data_, cuda_percentile_result_.RawData()); } label_t percentile_result = 0.0f; CopyFromCUDADeviceToHost<label_t>(&percentile_result, cuda_percentile_result_.RawData(), 1, __FILE__, __LINE__); SynchronizeCUDADevice(__FILE__, __LINE__); return static_cast<label_t>(percentile_result); } template <bool USE_WEIGHT> __global__ void GetGradientsKernel_RegressionL1(const double* cuda_scores, const label_t* cuda_labels, const label_t* cuda_weights, const data_size_t num_data, score_t* cuda_out_gradients, score_t* cuda_out_hessians) { const data_size_t data_index = static_cast<data_size_t>(blockDim.x * blockIdx.x + threadIdx.x); if (data_index < num_data) { if (!USE_WEIGHT) { const double diff = cuda_scores[data_index] - static_cast<double>(cuda_labels[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>((diff > 0.0f) - (diff < 0.0f)); cuda_out_hessians[data_index] = 1.0f; } else { const double diff = cuda_scores[data_index] - static_cast<double>(cuda_labels[data_index]); const score_t weight = static_cast<score_t>(cuda_weights[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>((diff > 0.0f) - (diff < 0.0f)) * weight; cuda_out_hessians[data_index] = weight; } } } void CUDARegressionL1loss::LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const { const int num_blocks = (num_data_ + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; if (cuda_weights_ == nullptr) { hipLaunchKernelGGL(( GetGradientsKernel_RegressionL1<false>), dim3(num_blocks), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION), 0, 0, score, cuda_labels_, nullptr, num_data_, gradients, hessians); } else { hipLaunchKernelGGL(( GetGradientsKernel_RegressionL1<true>), dim3(num_blocks), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION), 0, 0, score, cuda_labels_, cuda_weights_, num_data_, gradients, hessians); } } template <bool USE_WEIGHT> __global__ void RenewTreeOutputCUDAKernel_RegressionL1( const double* score, const label_t* label, const label_t* weight, double* residual_buffer, label_t* weight_by_leaf, double* weight_prefix_sum_buffer, const data_size_t* data_indices_in_leaf, const data_size_t* num_data_in_leaf, const data_size_t* data_start_in_leaf, data_size_t* data_indices_buffer, double* leaf_value) { const int leaf_index = static_cast<int>(blockIdx.x); const data_size_t data_start = data_start_in_leaf[leaf_index]; const data_size_t num_data = num_data_in_leaf[leaf_index]; data_size_t* data_indices_buffer_pointer = data_indices_buffer + data_start; const label_t* weight_by_leaf_pointer = weight_by_leaf + data_start; double* weight_prefix_sum_buffer_pointer = weight_prefix_sum_buffer + data_start; const double* residual_buffer_pointer = residual_buffer + data_start; const double alpha = 0.5f; for (data_size_t inner_data_index = data_start + static_cast<data_size_t>(threadIdx.x); inner_data_index < data_start + num_data; inner_data_index += static_cast<data_size_t>(blockDim.x)) { const data_size_t data_index = data_indices_in_leaf[inner_data_index]; const label_t data_label = label[data_index]; const double data_score = score[data_index]; residual_buffer[inner_data_index] = static_cast<double>(data_label) - data_score; if (USE_WEIGHT) { weight_by_leaf[inner_data_index] = weight[data_index]; } } __syncthreads(); const double renew_leaf_value = PercentileDevice<double, data_size_t, label_t, double, false, USE_WEIGHT>( residual_buffer_pointer, weight_by_leaf_pointer, data_indices_buffer_pointer, weight_prefix_sum_buffer_pointer, alpha, num_data); if (threadIdx.x == 0) { leaf_value[leaf_index] = renew_leaf_value; } } void CUDARegressionL1loss::LaunchRenewTreeOutputCUDAKernel( const double* score, const data_size_t* data_indices_in_leaf, const data_size_t* num_data_in_leaf, const data_size_t* data_start_in_leaf, const int num_leaves, double* leaf_value) const { if (cuda_weights_ == nullptr) { hipLaunchKernelGGL(( RenewTreeOutputCUDAKernel_RegressionL1<false>), dim3(num_leaves), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION / 2), 0, 0, score, cuda_labels_, cuda_weights_, cuda_residual_buffer_.RawData(), cuda_weight_by_leaf_buffer_.RawData(), cuda_weights_prefix_sum_.RawData(), data_indices_in_leaf, num_data_in_leaf, data_start_in_leaf, cuda_data_indices_buffer_.RawData(), leaf_value); } else { hipLaunchKernelGGL(( RenewTreeOutputCUDAKernel_RegressionL1<true>), dim3(num_leaves), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION / 4), 0, 0, score, cuda_labels_, cuda_weights_, cuda_residual_buffer_.RawData(), cuda_weight_by_leaf_buffer_.RawData(), cuda_weights_prefix_sum_.RawData(), data_indices_in_leaf, num_data_in_leaf, data_start_in_leaf, cuda_data_indices_buffer_.RawData(), leaf_value); } SynchronizeCUDADevice(__FILE__, __LINE__); } template <bool USE_WEIGHT> __global__ void GetGradientsKernel_Huber(const double* cuda_scores, const label_t* cuda_labels, const label_t* cuda_weights, const data_size_t num_data, const double alpha, score_t* cuda_out_gradients, score_t* cuda_out_hessians) { const data_size_t data_index = static_cast<data_size_t>(blockDim.x * blockIdx.x + threadIdx.x); if (data_index < num_data) { if (!USE_WEIGHT) { const double diff = cuda_scores[data_index] - static_cast<double>(cuda_labels[data_index]); if (fabs(diff) <= alpha) { cuda_out_gradients[data_index] = static_cast<score_t>(diff); } else { const score_t sign = static_cast<score_t>((diff > 0.0f) - (diff < 0.0f)); cuda_out_gradients[data_index] = static_cast<score_t>(sign * alpha); } cuda_out_hessians[data_index] = 1.0f; } else { const double diff = cuda_scores[data_index] - static_cast<double>(cuda_labels[data_index]); const score_t weight = static_cast<score_t>(cuda_weights[data_index]); if (fabs(diff) <= alpha) { cuda_out_gradients[data_index] = static_cast<score_t>(diff) * weight; } else { const score_t sign = static_cast<score_t>((diff > 0.0f) - (diff < 0.0f)); cuda_out_gradients[data_index] = static_cast<score_t>(sign * alpha) * weight; } cuda_out_hessians[data_index] = weight; } } } void CUDARegressionHuberLoss::LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const { const int num_blocks = (num_data_ + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; if (cuda_weights_ == nullptr) { hipLaunchKernelGGL(( GetGradientsKernel_Huber<false>), dim3(num_blocks), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION), 0, 0, score, cuda_labels_, nullptr, num_data_, alpha_, gradients, hessians); } else { hipLaunchKernelGGL(( GetGradientsKernel_Huber<true>), dim3(num_blocks), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION), 0, 0, score, cuda_labels_, cuda_weights_, num_data_, alpha_, gradients, hessians); } } template <bool USE_WEIGHT> __global__ void GetGradientsKernel_Fair(const double* cuda_scores, const label_t* cuda_labels, const label_t* cuda_weights, const data_size_t num_data, const double c, score_t* cuda_out_gradients, score_t* cuda_out_hessians) { const data_size_t data_index = static_cast<data_size_t>(blockDim.x * blockIdx.x + threadIdx.x); if (data_index < num_data) { if (!USE_WEIGHT) { const double diff = cuda_scores[data_index] - static_cast<double>(cuda_labels[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>(c * diff / (fabs(diff) + c)); cuda_out_hessians[data_index] = static_cast<score_t>(c * c / ((fabs(diff) + c) * (fabs(diff) + c))); } else { const double diff = cuda_scores[data_index] - static_cast<double>(cuda_labels[data_index]); const score_t weight = static_cast<score_t>(cuda_weights[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>(c * diff / (fabs(diff) + c) * weight); cuda_out_hessians[data_index] = static_cast<score_t>(c * c / ((fabs(diff) + c) * (fabs(diff) + c)) * weight); } } } void CUDARegressionFairLoss::LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const { const int num_blocks = (num_data_ + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; if (cuda_weights_ == nullptr) { hipLaunchKernelGGL(( GetGradientsKernel_Fair<false>), dim3(num_blocks), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION), 0, 0, score, cuda_labels_, nullptr, num_data_, c_, gradients, hessians); } else { hipLaunchKernelGGL(( GetGradientsKernel_Fair<true>), dim3(num_blocks), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION), 0, 0, score, cuda_labels_, cuda_weights_, num_data_, c_, gradients, hessians); } } void CUDARegressionPoissonLoss::LaunchCheckLabelKernel() const { ShuffleReduceSumGlobal<label_t, double>(cuda_labels_, static_cast<size_t>(num_data_), cuda_block_buffer_.RawData()); double label_sum = 0.0f; CopyFromCUDADeviceToHost<double>(&label_sum, cuda_block_buffer_.RawData(), 1, __FILE__, __LINE__); ShuffleReduceMinGlobal<label_t, double>(cuda_labels_, static_cast<size_t>(num_data_), cuda_block_buffer_.RawData()); double label_min = 0.0f; CopyFromCUDADeviceToHost<double>(&label_min, cuda_block_buffer_.RawData(), 1, __FILE__, __LINE__); if (label_min < 0.0f) { Log::Fatal("[%s]: at least one target label is negative", GetName()); } if (label_sum == 0.0f) { Log::Fatal("[%s]: sum of labels is zero", GetName()); } } template <bool USE_WEIGHT> __global__ void GetGradientsKernel_Poisson(const double* cuda_scores, const label_t* cuda_labels, const label_t* cuda_weights, const data_size_t num_data, const double max_delta_step, score_t* cuda_out_gradients, score_t* cuda_out_hessians) { const data_size_t data_index = static_cast<data_size_t>(blockDim.x * blockIdx.x + threadIdx.x); const double exp_max_delta_step = ::exp(max_delta_step); if (data_index < num_data) { if (!USE_WEIGHT) { const double exp_score = exp(cuda_scores[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>(exp_score - cuda_labels[data_index]); cuda_out_hessians[data_index] = static_cast<score_t>(exp_score * exp_max_delta_step); } else { const double exp_score = exp(cuda_scores[data_index]); const score_t weight = static_cast<score_t>(cuda_weights[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>((exp_score - cuda_labels[data_index]) * weight); cuda_out_hessians[data_index] = static_cast<score_t>(exp_score * exp_max_delta_step * weight); } } } void CUDARegressionPoissonLoss::LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const { const int num_blocks = (num_data_ + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; if (cuda_weights_ == nullptr) { hipLaunchKernelGGL(( GetGradientsKernel_Poisson<false>), dim3(num_blocks), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION), 0, 0, score, cuda_labels_, nullptr, num_data_, max_delta_step_, gradients, hessians); } else { hipLaunchKernelGGL(( GetGradientsKernel_Poisson<true>), dim3(num_blocks), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION), 0, 0, score, cuda_labels_, cuda_weights_, num_data_, max_delta_step_, gradients, hessians); } } __global__ void ConvertOutputCUDAKernel_Regression_Poisson(const data_size_t num_data, const double* input, double* output) { const int data_index = static_cast<data_size_t>(blockIdx.x * blockDim.x + threadIdx.x); if (data_index < num_data) { output[data_index] = exp(input[data_index]); } } const double* CUDARegressionPoissonLoss::LaunchConvertOutputCUDAKernel(const data_size_t num_data, const double* input, double* output) const { const int num_blocks = (num_data + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; hipLaunchKernelGGL(( ConvertOutputCUDAKernel_Regression_Poisson), dim3(num_blocks), dim3(GET_GRADIENTS_BLOCK_SIZE_REGRESSION), 0, 0, num_data, input, output); return output; } } // namespace LightGBM #endif // USE_ROCM
db9f587e52f31f7fee7d6b9611e999f050d6ef1f.cu
/*! * Copyright (c) 2021 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for * license information. */ #ifdef USE_CUDA #include "cuda_regression_objective.hpp" #include <LightGBM/cuda/cuda_algorithms.hpp> namespace LightGBM { template <typename HOST_OBJECTIVE> void CUDARegressionObjectiveInterface<HOST_OBJECTIVE>::Init(const Metadata& metadata, data_size_t num_data) { CUDAObjectiveInterface<HOST_OBJECTIVE>::Init(metadata, num_data); const data_size_t num_get_gradients_blocks = (this->num_data_ + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; cuda_block_buffer_.Resize(static_cast<size_t>(num_get_gradients_blocks)); if (this->sqrt_) { cuda_trans_label_.Resize(this->trans_label_.size()); CopyFromHostToCUDADevice<label_t>(cuda_trans_label_.RawData(), this->trans_label_.data(), this->trans_label_.size(), __FILE__, __LINE__); this->cuda_labels_ = cuda_trans_label_.RawData(); } } template void CUDARegressionObjectiveInterface<RegressionL2loss>::Init(const Metadata& metadata, data_size_t num_data); template void CUDARegressionObjectiveInterface<RegressionL1loss>::Init(const Metadata& metadata, data_size_t num_data); template void CUDARegressionObjectiveInterface<RegressionHuberLoss>::Init(const Metadata& metadata, data_size_t num_data); template void CUDARegressionObjectiveInterface<RegressionFairLoss>::Init(const Metadata& metadata, data_size_t num_data); template void CUDARegressionObjectiveInterface<RegressionPoissonLoss>::Init(const Metadata& metadata, data_size_t num_data); template void CUDARegressionObjectiveInterface<RegressionQuantileloss>::Init(const Metadata& metadata, data_size_t num_data); template <typename HOST_OBJECTIVE> double CUDARegressionObjectiveInterface<HOST_OBJECTIVE>::LaunchCalcInitScoreKernel(const int /*class_id*/) const { double label_sum = 0.0f, weight_sum = 0.0f; if (this->cuda_weights_ == nullptr) { ShuffleReduceSumGlobal<label_t, double>(this->cuda_labels_, static_cast<size_t>(this->num_data_), cuda_block_buffer_.RawData()); CopyFromCUDADeviceToHost<double>(&label_sum, cuda_block_buffer_.RawData(), 1, __FILE__, __LINE__); weight_sum = static_cast<double>(this->num_data_); } else { ShuffleReduceDotProdGlobal<label_t, double>(this->cuda_labels_, this->cuda_weights_, static_cast<size_t>(this->num_data_), cuda_block_buffer_.RawData()); CopyFromCUDADeviceToHost<double>(&label_sum, cuda_block_buffer_.RawData(), 1, __FILE__, __LINE__); ShuffleReduceSumGlobal<label_t, double>(this->cuda_weights_, static_cast<size_t>(this->num_data_), cuda_block_buffer_.RawData()); CopyFromCUDADeviceToHost<double>(&weight_sum, cuda_block_buffer_.RawData(), 1, __FILE__, __LINE__); } return label_sum / weight_sum; } template double CUDARegressionObjectiveInterface<RegressionL2loss>::LaunchCalcInitScoreKernel(const int class_id) const; template double CUDARegressionObjectiveInterface<RegressionL1loss>::LaunchCalcInitScoreKernel(const int class_id) const; template double CUDARegressionObjectiveInterface<RegressionHuberLoss>::LaunchCalcInitScoreKernel(const int class_id) const; template double CUDARegressionObjectiveInterface<RegressionFairLoss>::LaunchCalcInitScoreKernel(const int class_id) const; template double CUDARegressionObjectiveInterface<RegressionPoissonLoss>::LaunchCalcInitScoreKernel(const int class_id) const; template double CUDARegressionObjectiveInterface<RegressionQuantileloss>::LaunchCalcInitScoreKernel(const int class_id) const; __global__ void ConvertOutputCUDAKernel_Regression(const bool sqrt, const data_size_t num_data, const double* input, double* output) { const int data_index = static_cast<data_size_t>(blockIdx.x * blockDim.x + threadIdx.x); if (data_index < num_data) { if (sqrt) { const double sign = input[data_index] >= 0.0f ? 1 : -1; output[data_index] = sign * input[data_index] * input[data_index]; } else { output[data_index] = input[data_index]; } } } const double* CUDARegressionL2loss::LaunchConvertOutputCUDAKernel(const data_size_t num_data, const double* input, double* output) const { const int num_blocks = (num_data + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; if (sqrt_) { ConvertOutputCUDAKernel_Regression<<<num_blocks, GET_GRADIENTS_BLOCK_SIZE_REGRESSION>>>(sqrt_, num_data, input, output); return output; } else { return input; } } template <bool USE_WEIGHT> __global__ void GetGradientsKernel_RegressionL2(const double* cuda_scores, const label_t* cuda_labels, const label_t* cuda_weights, const data_size_t num_data, score_t* cuda_out_gradients, score_t* cuda_out_hessians) { const data_size_t data_index = static_cast<data_size_t>(blockDim.x * blockIdx.x + threadIdx.x); if (data_index < num_data) { if (!USE_WEIGHT) { cuda_out_gradients[data_index] = static_cast<score_t>(cuda_scores[data_index] - cuda_labels[data_index]); cuda_out_hessians[data_index] = 1.0f; } else { const score_t weight = static_cast<score_t>(cuda_weights[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>(cuda_scores[data_index] - cuda_labels[data_index]) * weight; cuda_out_hessians[data_index] = weight; } } } void CUDARegressionL2loss::LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const { const int num_blocks = (num_data_ + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; if (cuda_weights_ == nullptr) { GetGradientsKernel_RegressionL2<false><<<num_blocks, GET_GRADIENTS_BLOCK_SIZE_REGRESSION>>>(score, cuda_labels_, nullptr, num_data_, gradients, hessians); } else { GetGradientsKernel_RegressionL2<true><<<num_blocks, GET_GRADIENTS_BLOCK_SIZE_REGRESSION>>>(score, cuda_labels_, cuda_weights_, num_data_, gradients, hessians); } } double CUDARegressionL1loss::LaunchCalcInitScoreKernel(const int /*class_id*/) const { const double alpha = 0.5f; if (cuda_weights_ == nullptr) { PercentileGlobal<label_t, data_size_t, label_t, double, false, false>( cuda_labels_, nullptr, cuda_data_indices_buffer_.RawData(), nullptr, nullptr, alpha, num_data_, cuda_percentile_result_.RawData()); } else { PercentileGlobal<label_t, data_size_t, label_t, double, false, true>( cuda_labels_, cuda_weights_, cuda_data_indices_buffer_.RawData(), cuda_weights_prefix_sum_.RawData(), cuda_weights_prefix_sum_buffer_.RawData(), alpha, num_data_, cuda_percentile_result_.RawData()); } label_t percentile_result = 0.0f; CopyFromCUDADeviceToHost<label_t>(&percentile_result, cuda_percentile_result_.RawData(), 1, __FILE__, __LINE__); SynchronizeCUDADevice(__FILE__, __LINE__); return static_cast<label_t>(percentile_result); } template <bool USE_WEIGHT> __global__ void GetGradientsKernel_RegressionL1(const double* cuda_scores, const label_t* cuda_labels, const label_t* cuda_weights, const data_size_t num_data, score_t* cuda_out_gradients, score_t* cuda_out_hessians) { const data_size_t data_index = static_cast<data_size_t>(blockDim.x * blockIdx.x + threadIdx.x); if (data_index < num_data) { if (!USE_WEIGHT) { const double diff = cuda_scores[data_index] - static_cast<double>(cuda_labels[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>((diff > 0.0f) - (diff < 0.0f)); cuda_out_hessians[data_index] = 1.0f; } else { const double diff = cuda_scores[data_index] - static_cast<double>(cuda_labels[data_index]); const score_t weight = static_cast<score_t>(cuda_weights[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>((diff > 0.0f) - (diff < 0.0f)) * weight; cuda_out_hessians[data_index] = weight; } } } void CUDARegressionL1loss::LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const { const int num_blocks = (num_data_ + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; if (cuda_weights_ == nullptr) { GetGradientsKernel_RegressionL1<false><<<num_blocks, GET_GRADIENTS_BLOCK_SIZE_REGRESSION>>>(score, cuda_labels_, nullptr, num_data_, gradients, hessians); } else { GetGradientsKernel_RegressionL1<true><<<num_blocks, GET_GRADIENTS_BLOCK_SIZE_REGRESSION>>>(score, cuda_labels_, cuda_weights_, num_data_, gradients, hessians); } } template <bool USE_WEIGHT> __global__ void RenewTreeOutputCUDAKernel_RegressionL1( const double* score, const label_t* label, const label_t* weight, double* residual_buffer, label_t* weight_by_leaf, double* weight_prefix_sum_buffer, const data_size_t* data_indices_in_leaf, const data_size_t* num_data_in_leaf, const data_size_t* data_start_in_leaf, data_size_t* data_indices_buffer, double* leaf_value) { const int leaf_index = static_cast<int>(blockIdx.x); const data_size_t data_start = data_start_in_leaf[leaf_index]; const data_size_t num_data = num_data_in_leaf[leaf_index]; data_size_t* data_indices_buffer_pointer = data_indices_buffer + data_start; const label_t* weight_by_leaf_pointer = weight_by_leaf + data_start; double* weight_prefix_sum_buffer_pointer = weight_prefix_sum_buffer + data_start; const double* residual_buffer_pointer = residual_buffer + data_start; const double alpha = 0.5f; for (data_size_t inner_data_index = data_start + static_cast<data_size_t>(threadIdx.x); inner_data_index < data_start + num_data; inner_data_index += static_cast<data_size_t>(blockDim.x)) { const data_size_t data_index = data_indices_in_leaf[inner_data_index]; const label_t data_label = label[data_index]; const double data_score = score[data_index]; residual_buffer[inner_data_index] = static_cast<double>(data_label) - data_score; if (USE_WEIGHT) { weight_by_leaf[inner_data_index] = weight[data_index]; } } __syncthreads(); const double renew_leaf_value = PercentileDevice<double, data_size_t, label_t, double, false, USE_WEIGHT>( residual_buffer_pointer, weight_by_leaf_pointer, data_indices_buffer_pointer, weight_prefix_sum_buffer_pointer, alpha, num_data); if (threadIdx.x == 0) { leaf_value[leaf_index] = renew_leaf_value; } } void CUDARegressionL1loss::LaunchRenewTreeOutputCUDAKernel( const double* score, const data_size_t* data_indices_in_leaf, const data_size_t* num_data_in_leaf, const data_size_t* data_start_in_leaf, const int num_leaves, double* leaf_value) const { if (cuda_weights_ == nullptr) { RenewTreeOutputCUDAKernel_RegressionL1<false><<<num_leaves, GET_GRADIENTS_BLOCK_SIZE_REGRESSION / 2>>>( score, cuda_labels_, cuda_weights_, cuda_residual_buffer_.RawData(), cuda_weight_by_leaf_buffer_.RawData(), cuda_weights_prefix_sum_.RawData(), data_indices_in_leaf, num_data_in_leaf, data_start_in_leaf, cuda_data_indices_buffer_.RawData(), leaf_value); } else { RenewTreeOutputCUDAKernel_RegressionL1<true><<<num_leaves, GET_GRADIENTS_BLOCK_SIZE_REGRESSION / 4>>>( score, cuda_labels_, cuda_weights_, cuda_residual_buffer_.RawData(), cuda_weight_by_leaf_buffer_.RawData(), cuda_weights_prefix_sum_.RawData(), data_indices_in_leaf, num_data_in_leaf, data_start_in_leaf, cuda_data_indices_buffer_.RawData(), leaf_value); } SynchronizeCUDADevice(__FILE__, __LINE__); } template <bool USE_WEIGHT> __global__ void GetGradientsKernel_Huber(const double* cuda_scores, const label_t* cuda_labels, const label_t* cuda_weights, const data_size_t num_data, const double alpha, score_t* cuda_out_gradients, score_t* cuda_out_hessians) { const data_size_t data_index = static_cast<data_size_t>(blockDim.x * blockIdx.x + threadIdx.x); if (data_index < num_data) { if (!USE_WEIGHT) { const double diff = cuda_scores[data_index] - static_cast<double>(cuda_labels[data_index]); if (fabs(diff) <= alpha) { cuda_out_gradients[data_index] = static_cast<score_t>(diff); } else { const score_t sign = static_cast<score_t>((diff > 0.0f) - (diff < 0.0f)); cuda_out_gradients[data_index] = static_cast<score_t>(sign * alpha); } cuda_out_hessians[data_index] = 1.0f; } else { const double diff = cuda_scores[data_index] - static_cast<double>(cuda_labels[data_index]); const score_t weight = static_cast<score_t>(cuda_weights[data_index]); if (fabs(diff) <= alpha) { cuda_out_gradients[data_index] = static_cast<score_t>(diff) * weight; } else { const score_t sign = static_cast<score_t>((diff > 0.0f) - (diff < 0.0f)); cuda_out_gradients[data_index] = static_cast<score_t>(sign * alpha) * weight; } cuda_out_hessians[data_index] = weight; } } } void CUDARegressionHuberLoss::LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const { const int num_blocks = (num_data_ + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; if (cuda_weights_ == nullptr) { GetGradientsKernel_Huber<false><<<num_blocks, GET_GRADIENTS_BLOCK_SIZE_REGRESSION>>>(score, cuda_labels_, nullptr, num_data_, alpha_, gradients, hessians); } else { GetGradientsKernel_Huber<true><<<num_blocks, GET_GRADIENTS_BLOCK_SIZE_REGRESSION>>>(score, cuda_labels_, cuda_weights_, num_data_, alpha_, gradients, hessians); } } template <bool USE_WEIGHT> __global__ void GetGradientsKernel_Fair(const double* cuda_scores, const label_t* cuda_labels, const label_t* cuda_weights, const data_size_t num_data, const double c, score_t* cuda_out_gradients, score_t* cuda_out_hessians) { const data_size_t data_index = static_cast<data_size_t>(blockDim.x * blockIdx.x + threadIdx.x); if (data_index < num_data) { if (!USE_WEIGHT) { const double diff = cuda_scores[data_index] - static_cast<double>(cuda_labels[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>(c * diff / (fabs(diff) + c)); cuda_out_hessians[data_index] = static_cast<score_t>(c * c / ((fabs(diff) + c) * (fabs(diff) + c))); } else { const double diff = cuda_scores[data_index] - static_cast<double>(cuda_labels[data_index]); const score_t weight = static_cast<score_t>(cuda_weights[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>(c * diff / (fabs(diff) + c) * weight); cuda_out_hessians[data_index] = static_cast<score_t>(c * c / ((fabs(diff) + c) * (fabs(diff) + c)) * weight); } } } void CUDARegressionFairLoss::LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const { const int num_blocks = (num_data_ + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; if (cuda_weights_ == nullptr) { GetGradientsKernel_Fair<false><<<num_blocks, GET_GRADIENTS_BLOCK_SIZE_REGRESSION>>>(score, cuda_labels_, nullptr, num_data_, c_, gradients, hessians); } else { GetGradientsKernel_Fair<true><<<num_blocks, GET_GRADIENTS_BLOCK_SIZE_REGRESSION>>>(score, cuda_labels_, cuda_weights_, num_data_, c_, gradients, hessians); } } void CUDARegressionPoissonLoss::LaunchCheckLabelKernel() const { ShuffleReduceSumGlobal<label_t, double>(cuda_labels_, static_cast<size_t>(num_data_), cuda_block_buffer_.RawData()); double label_sum = 0.0f; CopyFromCUDADeviceToHost<double>(&label_sum, cuda_block_buffer_.RawData(), 1, __FILE__, __LINE__); ShuffleReduceMinGlobal<label_t, double>(cuda_labels_, static_cast<size_t>(num_data_), cuda_block_buffer_.RawData()); double label_min = 0.0f; CopyFromCUDADeviceToHost<double>(&label_min, cuda_block_buffer_.RawData(), 1, __FILE__, __LINE__); if (label_min < 0.0f) { Log::Fatal("[%s]: at least one target label is negative", GetName()); } if (label_sum == 0.0f) { Log::Fatal("[%s]: sum of labels is zero", GetName()); } } template <bool USE_WEIGHT> __global__ void GetGradientsKernel_Poisson(const double* cuda_scores, const label_t* cuda_labels, const label_t* cuda_weights, const data_size_t num_data, const double max_delta_step, score_t* cuda_out_gradients, score_t* cuda_out_hessians) { const data_size_t data_index = static_cast<data_size_t>(blockDim.x * blockIdx.x + threadIdx.x); const double exp_max_delta_step = std::exp(max_delta_step); if (data_index < num_data) { if (!USE_WEIGHT) { const double exp_score = exp(cuda_scores[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>(exp_score - cuda_labels[data_index]); cuda_out_hessians[data_index] = static_cast<score_t>(exp_score * exp_max_delta_step); } else { const double exp_score = exp(cuda_scores[data_index]); const score_t weight = static_cast<score_t>(cuda_weights[data_index]); cuda_out_gradients[data_index] = static_cast<score_t>((exp_score - cuda_labels[data_index]) * weight); cuda_out_hessians[data_index] = static_cast<score_t>(exp_score * exp_max_delta_step * weight); } } } void CUDARegressionPoissonLoss::LaunchGetGradientsKernel(const double* score, score_t* gradients, score_t* hessians) const { const int num_blocks = (num_data_ + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; if (cuda_weights_ == nullptr) { GetGradientsKernel_Poisson<false><<<num_blocks, GET_GRADIENTS_BLOCK_SIZE_REGRESSION>>>( score, cuda_labels_, nullptr, num_data_, max_delta_step_, gradients, hessians); } else { GetGradientsKernel_Poisson<true><<<num_blocks, GET_GRADIENTS_BLOCK_SIZE_REGRESSION>>>( score, cuda_labels_, cuda_weights_, num_data_, max_delta_step_, gradients, hessians); } } __global__ void ConvertOutputCUDAKernel_Regression_Poisson(const data_size_t num_data, const double* input, double* output) { const int data_index = static_cast<data_size_t>(blockIdx.x * blockDim.x + threadIdx.x); if (data_index < num_data) { output[data_index] = exp(input[data_index]); } } const double* CUDARegressionPoissonLoss::LaunchConvertOutputCUDAKernel(const data_size_t num_data, const double* input, double* output) const { const int num_blocks = (num_data + GET_GRADIENTS_BLOCK_SIZE_REGRESSION - 1) / GET_GRADIENTS_BLOCK_SIZE_REGRESSION; ConvertOutputCUDAKernel_Regression_Poisson<<<num_blocks, GET_GRADIENTS_BLOCK_SIZE_REGRESSION>>>(num_data, input, output); return output; } } // namespace LightGBM #endif // USE_CUDA
cdaec8fc6e6306832e220e32074cb1b4b5de84d9.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHTensorMathCompareT.cuh> #include <THH/THHTensor.hpp> #include <THH/generic/THHTensorMathCompareT.hip> #include <THH/THHGenerateShortType.h>
cdaec8fc6e6306832e220e32074cb1b4b5de84d9.cu
#include <THC/THCTensorMathCompareT.cuh> #include <THC/THCTensor.hpp> #include <THC/generic/THCTensorMathCompareT.cu> #include <THC/THCGenerateShortType.h>
e256ea373311d74c070df7d8e876c8a4635dfece.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "archivos_csv.c" #include <time.h> __global__ void KernelGPU(float *d_a, float *d_b, float *d_c,int f1, int M, int c2){ int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if(i < f1 && j < c2){ float Pvalue = 0.0; for(int k = 0; k < M; k++){ Pvalue += d_a[i*M+k] * d_b[k*c2+j]; } d_c[i * c2 + j] = Pvalue; } } void createVector(float *a, int f1, float *b, int c2, int inteM, float *c){ // definiendo variables y reservando memoria float *d_a, *d_b, *d_c; int blocksize = 32; hipMalloc((void**)&d_a, f1 * inteM * sizeof(float)); hipMemcpy(d_a, a, f1 * inteM * sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**)&d_b, inteM * c2 * sizeof(float)); hipMemcpy(d_b, b, inteM * c2 * sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**)&d_c, f1 * c2 * sizeof(float)); dim3 dimBlock(blocksize , blocksize , 1); dim3 dimGrid(ceil(inteM/float(blocksize)),ceil(f1/float(blocksize)),1); hipLaunchKernelGGL(( KernelGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a,d_b,d_c,f1,inteM,c2); hipMemcpy(c,d_c, f1 * c2 * sizeof(float), hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_b); hipFree(d_c); } int main(int argc, char *argv[]){ if(argc != 3){ printf("no se han ingresado los archivos necesarios\n"); } FILE *fp; fp = fopen(argv[1], "r"); if(fp==NULL){ fputs("File error",stderr); return 1; } fclose(fp); fp = fopen(argv[2], "r"); if(fp == NULL){ fputs("file error", stderr); return 1; } fclose(fp); int Rows_1 = Detected_rows(fp,argv[1]); int Columns_1 = Detected_columns(fp,argv[1]); int Rows_2 = Detected_rows(fp,argv[2]); int Columns_2 = Detected_columns(fp,argv[2]); if(Columns_1 != Rows_2){ printf("las matrizes no cumplen los requisitos para la multiplicacion entre estas"); return 1; } int inteM = Columns_1; float *Matriz1 = (float*)malloc(Rows_1 * inteM * sizeof(float)); float *Matriz2 = (float*)malloc(inteM * Columns_2 * sizeof(float)); float *MatrizR = (float*)malloc(Rows_1 * Columns_2 * sizeof(float)); ExtracData(fp, Matriz1,argv[1],Rows_1,inteM); ExtracData(fp, Matriz2,argv[2],inteM,Columns_2); printf("Matriz 1: \n"); printMatriz(Matriz1,Rows_1, inteM); printf("\n"); printf("matriz 2: \n"); printMatriz(Matriz2,inteM,Columns_2); printf("\n"); time_inicial = clock(); createVector(Matriz1,Rows_1,Matriz2,Columns_2,inteM,MatrizR); time_final = clock(); printf("tiempo de ejecucion: %f\n", (time_final-time_inicial/CLOCKS_PER_SEC)); printf("matriz Respuesta: \n"); printMatriz(MatrizR, Rows_1, Columns_2); printf("\n"); free(Matriz1); free(Matriz2); free(MatrizR); return 0; }
e256ea373311d74c070df7d8e876c8a4635dfece.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include "archivos_csv.c" #include <time.h> __global__ void KernelGPU(float *d_a, float *d_b, float *d_c,int f1, int M, int c2){ int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if(i < f1 && j < c2){ float Pvalue = 0.0; for(int k = 0; k < M; k++){ Pvalue += d_a[i*M+k] * d_b[k*c2+j]; } d_c[i * c2 + j] = Pvalue; } } void createVector(float *a, int f1, float *b, int c2, int inteM, float *c){ // definiendo variables y reservando memoria float *d_a, *d_b, *d_c; int blocksize = 32; cudaMalloc((void**)&d_a, f1 * inteM * sizeof(float)); cudaMemcpy(d_a, a, f1 * inteM * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_b, inteM * c2 * sizeof(float)); cudaMemcpy(d_b, b, inteM * c2 * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_c, f1 * c2 * sizeof(float)); dim3 dimBlock(blocksize , blocksize , 1); dim3 dimGrid(ceil(inteM/float(blocksize)),ceil(f1/float(blocksize)),1); KernelGPU<<< dimGrid, dimBlock>>>(d_a,d_b,d_c,f1,inteM,c2); cudaMemcpy(c,d_c, f1 * c2 * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } int main(int argc, char *argv[]){ if(argc != 3){ printf("no se han ingresado los archivos necesarios\n"); } FILE *fp; fp = fopen(argv[1], "r"); if(fp==NULL){ fputs("File error",stderr); return 1; } fclose(fp); fp = fopen(argv[2], "r"); if(fp == NULL){ fputs("file error", stderr); return 1; } fclose(fp); int Rows_1 = Detected_rows(fp,argv[1]); int Columns_1 = Detected_columns(fp,argv[1]); int Rows_2 = Detected_rows(fp,argv[2]); int Columns_2 = Detected_columns(fp,argv[2]); if(Columns_1 != Rows_2){ printf("las matrizes no cumplen los requisitos para la multiplicacion entre estas"); return 1; } int inteM = Columns_1; float *Matriz1 = (float*)malloc(Rows_1 * inteM * sizeof(float)); float *Matriz2 = (float*)malloc(inteM * Columns_2 * sizeof(float)); float *MatrizR = (float*)malloc(Rows_1 * Columns_2 * sizeof(float)); ExtracData(fp, Matriz1,argv[1],Rows_1,inteM); ExtracData(fp, Matriz2,argv[2],inteM,Columns_2); printf("Matriz 1: \n"); printMatriz(Matriz1,Rows_1, inteM); printf("\n"); printf("matriz 2: \n"); printMatriz(Matriz2,inteM,Columns_2); printf("\n"); time_inicial = clock(); createVector(Matriz1,Rows_1,Matriz2,Columns_2,inteM,MatrizR); time_final = clock(); printf("tiempo de ejecucion: %f\n", (time_final-time_inicial/CLOCKS_PER_SEC)); printf("matriz Respuesta: \n"); printMatriz(MatrizR, Rows_1, Columns_2); printf("\n"); free(Matriz1); free(Matriz2); free(MatrizR); return 0; }
c0d2ed140c6f3c796f67b8b38dc98debd9e9302f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Returns the incomplete gamma function P(a,x), evaluated by its series * representation. Assumes a > 0 and x >= 0. */ __device__ float gser(float a, float x){ int GAMMA_ITMAX = 200; float GAMMA_EPS = 2.22e-16; float ap, del, sum; int i; ap = a; del = 1.0/a; sum = del; for (i = 1; i <= GAMMA_ITMAX; ++ i) { ap += 1.0; del *= x/ap; sum += del; if (fabsf(del) < fabsf(sum)*GAMMA_EPS) { return sum*expf(-x + a*logf(x) - lgammaf(a)); } } return 1.0; // Too many iterations } /** * Returns the complementary incomplete gamma function Q(a,x), evaluated by its * continued fraction representation. Assumes a > 0 and x >= 0. */ __device__ float gcf(float a, float x){ int GAMMA_ITMAX = 200; float GAMMA_EPS = 2.22e-16; float GAMMA_FPMIN = (1.18e-38/GAMMA_EPS); float b, c, d, h, an, del; int i; b = x + 1.0 - a; c = 1.0/GAMMA_FPMIN; d = 1.0/b; h = d; for (i = 1; i <= GAMMA_ITMAX; ++ i) { an = -i*(i - a); b += 2.0; d = an*d + b; if (fabsf(d) < GAMMA_FPMIN) d = GAMMA_FPMIN; c = b + an/c; if (fabsf(c) < GAMMA_FPMIN) c = GAMMA_FPMIN; d = 1.0/d; del = d*c; h *= del; if (fabsf(del - 1.0) < GAMMA_EPS) { return expf(-x + a*logf(x) - lgammaf(a))*h; } } return 0.0; // Too many iterations } /** * Returns the incomplete gamma function P(a,x). */ __device__ float gammp(float a, float x){ //if (a <= 0.0) die ("gammp(): a illegal"); //if (x < 0.0) die ("gammp(): x illegal"); return x == 0.0 ? 0.0 : x < a + 1.0 ? gser(a,x) : 1.0 - gcf(a,x); } /** * Returns the complementary incomplete gamma function Q(a,x) = 1 - P(a,x). */ __device__ float gammq(float a, float x){ //if (a <= 0.0) die ("gammq(): a illegal"); //if (x < 0.0) die ("gammq(): x illegal"); if (x <= powf(2.0,-3.33)) { return x == 0.0 ? 1.0 : x < a + 1.0 ? 1.0 - gser(a,powf(2.0,-3.33)) : gcf(a, powf(2.0,-3.33)); } else { return x == 0.0 ? 1.0 : x < a + 1.0 ? 1.0 - gser(a,x) : gcf(a,x); } } __device__ void ensure_appropriate_values(float e_value, float lognormval, float* bvalue){ if (!isnan(e_value) && !isinf(e_value)) { if (e_value<=powf(2.0,-3.33)) { *bvalue = 0; } else { *bvalue = floorf(logf(e_value)/lognormval)+11; } } *bvalue = (float) min((int) *bvalue, HiCCUPS_W1_MAX_INDX ); } __device__ void process_masks_lr(int i_start, int i_max_p1, int msize, int t_col, float *c,float *d, int diff, float* evalue_d, float* evalue_dist_d, float* evalue_v, float* evalue_dist_v){ for (int i = i_start; i < i_max_p1; i++) { int index = i * msize + t_col; if (!isnan(c[index])) { *evalue_d -= c[index]; *evalue_dist_d -= d[abs(i+diff-t_col)]; } for (int j = -1; j < 2; j++) { *evalue_v += c[index + j]; *evalue_dist_v += d[abs(i+diff-t_col-j)]; } } } __device__ void process_masks_tb(int j_start, int j_max_p1, int msize, int t_row, float *c,float *d, int diff, float* evalue_d, float* evalue_dist_d, float* evalue_h, float* evalue_dist_h){ for (int j = j_start; j < j_max_p1; j++) { int index = t_row * msize + j; if (!isnan(c[index])) { *evalue_d -= c[index]; *evalue_dist_d -= d[abs(t_row+diff-j)]; } for (int i = -1; i < 2; i++) { *evalue_h += c[(t_row+i) * msize + j]; *evalue_dist_h += d[abs(t_row+i+diff-j)]; } } } extern "C" __global__ void BasicPeakCallingKernel(float *c, float *expectedbl, float *expecteddonut, float *expectedh, float *expectedv, float *observed, float *b_bl, float *b_donut, float *b_h, float *b_v, float *p, float *p_bl, float *p_donut, float *p_h, float *p_v, float *tbl, float *td, float *th, float *tv, float *d, float *kr1, float *kr2, float *bound1, float *bound3) { // 2D Thread ID int t_col = threadIdx.x + blockIdx.x * blockDim.x; int t_row = threadIdx.y + blockIdx.y * blockDim.y; // Evalue is used to store the element of the matrix // that is computed by the thread float Evalue_bl = 0; float Edistvalue_bl = 0; float Evalue_donut = 0; float Edistvalue_donut = 0; float Evalue_h = 0; float Edistvalue_h = 0; float Evalue_v = 0; float Edistvalue_v = 0; float e_bl = 0; float e_donut = 0; float e_h = 0; float e_v = 0; float o = 0; float bvalue_bl = 0; float bvalue_donut = 0; float bvalue_h = 0; float bvalue_v = 0; float pvalue_bl = 1; float pvalue_donut = 1; float pvalue_h = 1; float pvalue_v = 1; int wsize = HiCCUPS_WINDOW; int msize = HiCCUPS_MATRIX_SIZE; int pwidth = HiCCUPS_PEAK_WIDTH; int buffer_width = HiCCUPS_REGION_MARGIN; int diff = bound1[0] - bound3[0]; int diagDist = abs(t_row+diff-t_col); int maxIndex = msize-buffer_width; wsize = min(wsize, (abs(t_row+diff-t_col)-1)/2); if (wsize <= pwidth) { wsize = pwidth + 1; } wsize = min(wsize, buffer_width); // only run if within central window (not in data buffer margins) if (t_row >= buffer_width && t_row<maxIndex && t_col>= buffer_width && t_col<maxIndex) { // calculate initial bottom left box for (int i = t_row+1; i <= t_row+wsize; i++) { for (int j = t_col-wsize; j < t_col; j++) { int index = i * msize + j; if (!isnan(c[index])) { if (i+diff-j<0) { Evalue_bl += c[index]; Edistvalue_bl += d[abs(i+diff-j)]; } } } } //Subtract off the middle peak for (int i = t_row+1; i <= t_row+pwidth; i++) { for (int j = t_col-pwidth; j < t_col; j++) { int index = i * msize + j; if (!isnan(c[index])) { if (i+diff-j<0) { Evalue_bl -= c[index]; Edistvalue_bl -= d[abs(i+diff-j)]; } } } } //fix box dimensions while (Evalue_bl<16) { Evalue_bl =0; Edistvalue_bl =0; wsize+=1; for (int i = t_row+1; i <= t_row+wsize; i++) { for (int j = t_col-wsize; j < t_col; j++) { int index = i * msize + j; if (!isnan(c[index]) && i+diff-j<0) { Evalue_bl += c[index]; Edistvalue_bl += d[abs(i+diff-j)]; if (i > t_row && i < t_row+pwidth+1 && j > t_col-pwidth-1 && j < t_col) { Evalue_bl -= c[index]; Edistvalue_bl -= d[abs(i+diff-j)]; } } } } if (wsize >= buffer_width) { break; } if (2*wsize>= abs(t_row+diff-t_col)) { break; } } // calculate donut for (int i = t_row-wsize; i <= t_row+wsize; ++i) { for (int j = t_col-wsize; j <= t_col+wsize; ++j) { int index = i * msize + j; if (!isnan(c[index])) { if (i+diff-j<0) { Evalue_donut += c[index]; Edistvalue_donut += d[abs(i+diff-j)]; } } } } //Subtract off the middle peak for (int i = t_row-pwidth; i <= t_row+pwidth; ++i) { for (int j = t_col-pwidth; j <= t_col+pwidth; ++j) { int index = i * msize + j; if (!isnan(c[index])) { if (i+diff-j<0) { Evalue_donut -= c[index]; Edistvalue_donut -= d[abs(i+diff-j)]; } } } } //Subtract off the cross hairs left side process_masks_lr(t_row-wsize, t_row-pwidth, msize, t_col, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_v, &Edistvalue_v); //Subtract off the cross hairs right side process_masks_lr(t_row+pwidth+1, t_row+wsize+1, msize, t_col, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_v, &Edistvalue_v); //Subtract off the cross hairs top side process_masks_tb(t_col-wsize, t_col-pwidth, msize, t_row, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_h, &Edistvalue_h); //Subtract off the cross hairs bottom side process_masks_tb(t_col+pwidth+1, t_col+wsize+1, msize, t_row, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_h, &Edistvalue_h); e_bl = ((Evalue_bl*d[diagDist])/Edistvalue_bl)*kr1[t_row]*kr2[t_col]; e_donut = ((Evalue_donut*d[diagDist])/Edistvalue_donut)*kr1[t_row]*kr2[t_col]; e_h = ((Evalue_h*d[diagDist])/Edistvalue_h)*kr1[t_row]*kr2[t_col]; e_v = ((Evalue_v*d[diagDist])/Edistvalue_v)*kr1[t_row]*kr2[t_col]; float lognorm = logf(powf(2.0,0.33)); ensure_appropriate_values(e_bl, lognorm, &bvalue_bl); ensure_appropriate_values(e_donut, lognorm, &bvalue_donut); ensure_appropriate_values(e_h, lognorm, &bvalue_h); ensure_appropriate_values(e_v, lognorm, &bvalue_v); int val_index = t_row * msize + t_col; o = roundf(c[val_index]*kr1[t_row]*kr2[t_col]); pvalue_bl = 1 - gammq(o, e_bl); pvalue_donut = 1 - gammq(o, e_donut); pvalue_h = 1 - gammq(o, e_h); pvalue_v = 1 - gammq(o, e_v); // Write the matrix to device memory; // each thread writes one element expectedbl[val_index] = e_bl; expecteddonut[val_index] = e_donut; expectedh[val_index] = e_h; expectedv[val_index] = e_v; observed[val_index] = o; b_bl[val_index] = bvalue_bl; b_donut[val_index] = bvalue_donut; b_h[val_index] = bvalue_h; b_v[val_index] = bvalue_v; if (pvalue_bl <= tbl[(int) bvalue_bl] && pvalue_donut <= td[(int) bvalue_donut] && pvalue_h <= th[(int) bvalue_h] && pvalue_v <= tv[(int) bvalue_v]) { p[val_index] = 1; } else { p[val_index] = 0; } p_bl[val_index] = pvalue_bl; p_donut[val_index] = pvalue_donut; p_h[val_index] = pvalue_h; p_v[val_index] = pvalue_v; } }
c0d2ed140c6f3c796f67b8b38dc98debd9e9302f.cu
/** * Returns the incomplete gamma function P(a,x), evaluated by its series * representation. Assumes a > 0 and x >= 0. */ __device__ float gser(float a, float x){ int GAMMA_ITMAX = 200; float GAMMA_EPS = 2.22e-16; float ap, del, sum; int i; ap = a; del = 1.0/a; sum = del; for (i = 1; i <= GAMMA_ITMAX; ++ i) { ap += 1.0; del *= x/ap; sum += del; if (fabsf(del) < fabsf(sum)*GAMMA_EPS) { return sum*expf(-x + a*logf(x) - lgammaf(a)); } } return 1.0; // Too many iterations } /** * Returns the complementary incomplete gamma function Q(a,x), evaluated by its * continued fraction representation. Assumes a > 0 and x >= 0. */ __device__ float gcf(float a, float x){ int GAMMA_ITMAX = 200; float GAMMA_EPS = 2.22e-16; float GAMMA_FPMIN = (1.18e-38/GAMMA_EPS); float b, c, d, h, an, del; int i; b = x + 1.0 - a; c = 1.0/GAMMA_FPMIN; d = 1.0/b; h = d; for (i = 1; i <= GAMMA_ITMAX; ++ i) { an = -i*(i - a); b += 2.0; d = an*d + b; if (fabsf(d) < GAMMA_FPMIN) d = GAMMA_FPMIN; c = b + an/c; if (fabsf(c) < GAMMA_FPMIN) c = GAMMA_FPMIN; d = 1.0/d; del = d*c; h *= del; if (fabsf(del - 1.0) < GAMMA_EPS) { return expf(-x + a*logf(x) - lgammaf(a))*h; } } return 0.0; // Too many iterations } /** * Returns the incomplete gamma function P(a,x). */ __device__ float gammp(float a, float x){ //if (a <= 0.0) die ("gammp(): a illegal"); //if (x < 0.0) die ("gammp(): x illegal"); return x == 0.0 ? 0.0 : x < a + 1.0 ? gser(a,x) : 1.0 - gcf(a,x); } /** * Returns the complementary incomplete gamma function Q(a,x) = 1 - P(a,x). */ __device__ float gammq(float a, float x){ //if (a <= 0.0) die ("gammq(): a illegal"); //if (x < 0.0) die ("gammq(): x illegal"); if (x <= powf(2.0,-3.33)) { return x == 0.0 ? 1.0 : x < a + 1.0 ? 1.0 - gser(a,powf(2.0,-3.33)) : gcf(a, powf(2.0,-3.33)); } else { return x == 0.0 ? 1.0 : x < a + 1.0 ? 1.0 - gser(a,x) : gcf(a,x); } } __device__ void ensure_appropriate_values(float e_value, float lognormval, float* bvalue){ if (!isnan(e_value) && !isinf(e_value)) { if (e_value<=powf(2.0,-3.33)) { *bvalue = 0; } else { *bvalue = floorf(logf(e_value)/lognormval)+11; } } *bvalue = (float) min((int) *bvalue, HiCCUPS_W1_MAX_INDX ); } __device__ void process_masks_lr(int i_start, int i_max_p1, int msize, int t_col, float *c,float *d, int diff, float* evalue_d, float* evalue_dist_d, float* evalue_v, float* evalue_dist_v){ for (int i = i_start; i < i_max_p1; i++) { int index = i * msize + t_col; if (!isnan(c[index])) { *evalue_d -= c[index]; *evalue_dist_d -= d[abs(i+diff-t_col)]; } for (int j = -1; j < 2; j++) { *evalue_v += c[index + j]; *evalue_dist_v += d[abs(i+diff-t_col-j)]; } } } __device__ void process_masks_tb(int j_start, int j_max_p1, int msize, int t_row, float *c,float *d, int diff, float* evalue_d, float* evalue_dist_d, float* evalue_h, float* evalue_dist_h){ for (int j = j_start; j < j_max_p1; j++) { int index = t_row * msize + j; if (!isnan(c[index])) { *evalue_d -= c[index]; *evalue_dist_d -= d[abs(t_row+diff-j)]; } for (int i = -1; i < 2; i++) { *evalue_h += c[(t_row+i) * msize + j]; *evalue_dist_h += d[abs(t_row+i+diff-j)]; } } } extern "C" __global__ void BasicPeakCallingKernel(float *c, float *expectedbl, float *expecteddonut, float *expectedh, float *expectedv, float *observed, float *b_bl, float *b_donut, float *b_h, float *b_v, float *p, float *p_bl, float *p_donut, float *p_h, float *p_v, float *tbl, float *td, float *th, float *tv, float *d, float *kr1, float *kr2, float *bound1, float *bound3) { // 2D Thread ID int t_col = threadIdx.x + blockIdx.x * blockDim.x; int t_row = threadIdx.y + blockIdx.y * blockDim.y; // Evalue is used to store the element of the matrix // that is computed by the thread float Evalue_bl = 0; float Edistvalue_bl = 0; float Evalue_donut = 0; float Edistvalue_donut = 0; float Evalue_h = 0; float Edistvalue_h = 0; float Evalue_v = 0; float Edistvalue_v = 0; float e_bl = 0; float e_donut = 0; float e_h = 0; float e_v = 0; float o = 0; float bvalue_bl = 0; float bvalue_donut = 0; float bvalue_h = 0; float bvalue_v = 0; float pvalue_bl = 1; float pvalue_donut = 1; float pvalue_h = 1; float pvalue_v = 1; int wsize = HiCCUPS_WINDOW; int msize = HiCCUPS_MATRIX_SIZE; int pwidth = HiCCUPS_PEAK_WIDTH; int buffer_width = HiCCUPS_REGION_MARGIN; int diff = bound1[0] - bound3[0]; int diagDist = abs(t_row+diff-t_col); int maxIndex = msize-buffer_width; wsize = min(wsize, (abs(t_row+diff-t_col)-1)/2); if (wsize <= pwidth) { wsize = pwidth + 1; } wsize = min(wsize, buffer_width); // only run if within central window (not in data buffer margins) if (t_row >= buffer_width && t_row<maxIndex && t_col>= buffer_width && t_col<maxIndex) { // calculate initial bottom left box for (int i = t_row+1; i <= t_row+wsize; i++) { for (int j = t_col-wsize; j < t_col; j++) { int index = i * msize + j; if (!isnan(c[index])) { if (i+diff-j<0) { Evalue_bl += c[index]; Edistvalue_bl += d[abs(i+diff-j)]; } } } } //Subtract off the middle peak for (int i = t_row+1; i <= t_row+pwidth; i++) { for (int j = t_col-pwidth; j < t_col; j++) { int index = i * msize + j; if (!isnan(c[index])) { if (i+diff-j<0) { Evalue_bl -= c[index]; Edistvalue_bl -= d[abs(i+diff-j)]; } } } } //fix box dimensions while (Evalue_bl<16) { Evalue_bl =0; Edistvalue_bl =0; wsize+=1; for (int i = t_row+1; i <= t_row+wsize; i++) { for (int j = t_col-wsize; j < t_col; j++) { int index = i * msize + j; if (!isnan(c[index]) && i+diff-j<0) { Evalue_bl += c[index]; Edistvalue_bl += d[abs(i+diff-j)]; if (i > t_row && i < t_row+pwidth+1 && j > t_col-pwidth-1 && j < t_col) { Evalue_bl -= c[index]; Edistvalue_bl -= d[abs(i+diff-j)]; } } } } if (wsize >= buffer_width) { break; } if (2*wsize>= abs(t_row+diff-t_col)) { break; } } // calculate donut for (int i = t_row-wsize; i <= t_row+wsize; ++i) { for (int j = t_col-wsize; j <= t_col+wsize; ++j) { int index = i * msize + j; if (!isnan(c[index])) { if (i+diff-j<0) { Evalue_donut += c[index]; Edistvalue_donut += d[abs(i+diff-j)]; } } } } //Subtract off the middle peak for (int i = t_row-pwidth; i <= t_row+pwidth; ++i) { for (int j = t_col-pwidth; j <= t_col+pwidth; ++j) { int index = i * msize + j; if (!isnan(c[index])) { if (i+diff-j<0) { Evalue_donut -= c[index]; Edistvalue_donut -= d[abs(i+diff-j)]; } } } } //Subtract off the cross hairs left side process_masks_lr(t_row-wsize, t_row-pwidth, msize, t_col, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_v, &Edistvalue_v); //Subtract off the cross hairs right side process_masks_lr(t_row+pwidth+1, t_row+wsize+1, msize, t_col, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_v, &Edistvalue_v); //Subtract off the cross hairs top side process_masks_tb(t_col-wsize, t_col-pwidth, msize, t_row, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_h, &Edistvalue_h); //Subtract off the cross hairs bottom side process_masks_tb(t_col+pwidth+1, t_col+wsize+1, msize, t_row, c, d, diff, &Evalue_donut, &Edistvalue_donut, &Evalue_h, &Edistvalue_h); e_bl = ((Evalue_bl*d[diagDist])/Edistvalue_bl)*kr1[t_row]*kr2[t_col]; e_donut = ((Evalue_donut*d[diagDist])/Edistvalue_donut)*kr1[t_row]*kr2[t_col]; e_h = ((Evalue_h*d[diagDist])/Edistvalue_h)*kr1[t_row]*kr2[t_col]; e_v = ((Evalue_v*d[diagDist])/Edistvalue_v)*kr1[t_row]*kr2[t_col]; float lognorm = logf(powf(2.0,0.33)); ensure_appropriate_values(e_bl, lognorm, &bvalue_bl); ensure_appropriate_values(e_donut, lognorm, &bvalue_donut); ensure_appropriate_values(e_h, lognorm, &bvalue_h); ensure_appropriate_values(e_v, lognorm, &bvalue_v); int val_index = t_row * msize + t_col; o = roundf(c[val_index]*kr1[t_row]*kr2[t_col]); pvalue_bl = 1 - gammq(o, e_bl); pvalue_donut = 1 - gammq(o, e_donut); pvalue_h = 1 - gammq(o, e_h); pvalue_v = 1 - gammq(o, e_v); // Write the matrix to device memory; // each thread writes one element expectedbl[val_index] = e_bl; expecteddonut[val_index] = e_donut; expectedh[val_index] = e_h; expectedv[val_index] = e_v; observed[val_index] = o; b_bl[val_index] = bvalue_bl; b_donut[val_index] = bvalue_donut; b_h[val_index] = bvalue_h; b_v[val_index] = bvalue_v; if (pvalue_bl <= tbl[(int) bvalue_bl] && pvalue_donut <= td[(int) bvalue_donut] && pvalue_h <= th[(int) bvalue_h] && pvalue_v <= tv[(int) bvalue_v]) { p[val_index] = 1; } else { p[val_index] = 0; } p_bl[val_index] = pvalue_bl; p_donut[val_index] = pvalue_donut; p_h[val_index] = pvalue_h; p_v[val_index] = pvalue_v; } }
a0b477917ff3dcb159c7c9bfb76cdb70491c5ecc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: jglaser #include "ParticleData.cuh" /*! \file ParticleData.cu \brief ImplementsGPU kernel code and data structure functions used by ParticleData */ #ifdef ENABLE_MPI #include <iterator> #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/scatter.h> #include <thrust/device_ptr.h> #include "hoomd/extern/kernels/scan.cuh" //! Kernel to partition particle data __global__ void gpu_scatter_particle_data_kernel( const unsigned int N, const Scalar4 *d_pos, const Scalar4 *d_vel, const Scalar3 *d_accel, const Scalar *d_charge, const Scalar *d_diameter, const int3 *d_image, const unsigned int *d_body, const Scalar4 *d_orientation, const Scalar4 *d_angmom, const Scalar3 *d_inertia, const Scalar4 *d_net_force, const Scalar4 *d_net_torque, const Scalar *d_net_virial, unsigned int net_virial_pitch, const unsigned int *d_tag, unsigned int *d_rtag, Scalar4 *d_pos_alt, Scalar4 *d_vel_alt, Scalar3 *d_accel_alt, Scalar *d_charge_alt, Scalar *d_diameter_alt, int3 *d_image_alt, unsigned int *d_body_alt, Scalar4 *d_orientation_alt, Scalar4 *d_angmom_alt, Scalar3 *d_inertia_alt, Scalar4 *d_net_force_alt, Scalar4 *d_net_torque_alt, Scalar *d_net_virial_alt, unsigned int *d_tag_alt, pdata_element *d_out, unsigned int *d_comm_flags, unsigned int *d_comm_flags_out, const unsigned int *d_scan) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= N) return; bool remove = d_comm_flags[idx]; unsigned int scan_remove = d_scan[idx]; unsigned int scan_keep = idx - scan_remove; if (remove) { pdata_element p; p.pos = d_pos[idx]; p.vel = d_vel[idx]; p.accel = d_accel[idx]; p.charge = d_charge[idx]; p.diameter = d_diameter[idx]; p.image = d_image[idx]; p.body = d_body[idx]; p.orientation = d_orientation[idx]; p.angmom = d_angmom[idx]; p.inertia = d_inertia[idx]; p.net_force = d_net_force[idx]; p.net_torque = d_net_torque[idx]; for (unsigned int j = 0; j < 6; ++j) p.net_virial[j] = d_net_virial[j*net_virial_pitch+idx]; p.tag = d_tag[idx]; d_out[scan_remove] = p; d_comm_flags_out[scan_remove] = d_comm_flags[idx]; // reset communication flags d_comm_flags[idx] = 0; // reset rtag d_rtag[p.tag] = NOT_LOCAL; } else { d_pos_alt[scan_keep] = d_pos[idx]; d_vel_alt[scan_keep] = d_vel[idx]; d_accel_alt[scan_keep] = d_accel[idx]; d_charge_alt[scan_keep] = d_charge[idx]; d_diameter_alt[scan_keep] = d_diameter[idx]; d_image_alt[scan_keep] = d_image[idx]; d_body_alt[scan_keep] = d_body[idx]; d_orientation_alt[scan_keep] = d_orientation[idx]; d_angmom_alt[scan_keep] = d_angmom[idx]; d_inertia_alt[scan_keep] = d_inertia[idx]; d_net_force_alt[scan_keep] = d_net_force[idx]; d_net_torque_alt[scan_keep] = d_net_torque[idx]; for (unsigned int j = 0; j < 6; ++j) d_net_virial_alt[j*net_virial_pitch+scan_keep] = d_net_virial[j*net_virial_pitch+idx]; unsigned int tag = d_tag[idx]; d_tag_alt[scan_keep] = tag; // update rtag d_rtag[tag] = scan_keep; } } __global__ void gpu_select_sent_particles( unsigned int N, unsigned int *d_comm_flags, unsigned int *d_tmp) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= N) return; d_tmp[idx] = d_comm_flags[idx] ? 1 : 0; } /*! \param N Number of local particles \param d_pos Device array of particle positions \param d_vel Device array of particle velocities \param d_accel Device array of particle accelerations \param d_charge Device array of particle charges \param d_diameter Device array of particle diameters \param d_image Device array of particle images \param d_body Device array of particle body tags \param d_orientation Device array of particle orientations \param d_angmom Device array of particle angular momenta \param d_inertia Device array of particle moments of inertia \param d_net_force Net force \param d_net_torque Net torque \param d_net_virial Net virial \param net_virial_pitch Pitch of net virial array \param d_tag Device array of particle tags \param d_rtag Device array for reverse-lookup table \param d_pos_alt Device array of particle positions (output) \param d_vel_alt Device array of particle velocities (output) \param d_accel_alt Device array of particle accelerations (output) \param d_charge_alt Device array of particle charges (output) \param d_diameter_alt Device array of particle diameters (output) \param d_image_alt Device array of particle images (output) \param d_body_alt Device array of particle body tags (output) \param d_orientation_alt Device array of particle orientations (output) \param d_angmom_alt Device array of particle angular momenta (output) \param d_inertia Device array of particle moments of inertia (output) \param d_net_force Net force (output) \param d_net_torque Net torque (output) \param d_net_virial Net virial (output) \param d_out Output array for packed particle data \param max_n_out Maximum number of elements to write to output array \returns Number of elements marked for removal */ unsigned int gpu_pdata_remove(const unsigned int N, const Scalar4 *d_pos, const Scalar4 *d_vel, const Scalar3 *d_accel, const Scalar *d_charge, const Scalar *d_diameter, const int3 *d_image, const unsigned int *d_body, const Scalar4 *d_orientation, const Scalar4 *d_angmom, const Scalar3 *d_inertia, const Scalar4 *d_net_force, const Scalar4 *d_net_torque, const Scalar *d_net_virial, unsigned int net_virial_pitch, const unsigned int *d_tag, unsigned int *d_rtag, Scalar4 *d_pos_alt, Scalar4 *d_vel_alt, Scalar3 *d_accel_alt, Scalar *d_charge_alt, Scalar *d_diameter_alt, int3 *d_image_alt, unsigned int *d_body_alt, Scalar4 *d_orientation_alt, Scalar4 *d_angmom_alt, Scalar3 *d_inertia_alt, Scalar4 *d_net_force_alt, Scalar4 *d_net_torque_alt, Scalar *d_net_virial_alt, unsigned int *d_tag_alt, pdata_element *d_out, unsigned int *d_comm_flags, unsigned int *d_comm_flags_out, unsigned int max_n_out, unsigned int *d_tmp, mgpu::ContextPtr mgpu_context) { unsigned int n_out; // partition particle data into local and removed particles unsigned int block_size =512; unsigned int n_blocks = N/block_size+1; // select nonzero communication flags hipLaunchKernelGGL(( gpu_select_sent_particles), dim3(n_blocks), dim3(block_size), 0, 0, N, d_comm_flags, d_tmp); // perform a scan over the array of ones and zeroes mgpu::Scan<mgpu::MgpuScanTypeExc>(d_tmp, N, (unsigned int) 0, mgpu::plus<unsigned int>(), (unsigned int *)NULL, &n_out, d_tmp, *mgpu_context); // Don't write past end of buffer if (n_out <= max_n_out) { // partition particle data into local and removed particles unsigned int block_size =512; unsigned int n_blocks = N/block_size+1; hipLaunchKernelGGL(( gpu_scatter_particle_data_kernel), dim3(n_blocks), dim3(block_size), 0, 0, N, d_pos, d_vel, d_accel, d_charge, d_diameter, d_image, d_body, d_orientation, d_angmom, d_inertia, d_net_force, d_net_torque, d_net_virial, net_virial_pitch, d_tag, d_rtag, d_pos_alt, d_vel_alt, d_accel_alt, d_charge_alt, d_diameter_alt, d_image_alt, d_body_alt, d_orientation_alt, d_angmom_alt, d_inertia_alt, d_net_force_alt, d_net_torque_alt, d_net_virial_alt, d_tag_alt, d_out, d_comm_flags, d_comm_flags_out, d_tmp); } // return elements written to output stream return n_out; } __global__ void gpu_pdata_add_particles_kernel(unsigned int old_nparticles, unsigned int num_add_ptls, Scalar4 *d_pos, Scalar4 *d_vel, Scalar3 *d_accel, Scalar *d_charge, Scalar *d_diameter, int3 *d_image, unsigned int *d_body, Scalar4 *d_orientation, Scalar4 *d_angmom, Scalar3 *d_inertia, Scalar4 *d_net_force, Scalar4 *d_net_torque, Scalar *d_net_virial, unsigned int net_virial_pitch, unsigned int *d_tag, unsigned int *d_rtag, const pdata_element *d_in, unsigned int *d_comm_flags) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= num_add_ptls) return; pdata_element p = d_in[idx]; unsigned int add_idx = old_nparticles + idx; d_pos[add_idx] = p.pos; d_vel[add_idx] = p.vel; d_accel[add_idx] = p.accel; d_charge[add_idx] = p.charge; d_diameter[add_idx] = p.diameter; d_image[add_idx] = p.image; d_body[add_idx] = p.body; d_orientation[add_idx] = p.orientation; d_angmom[add_idx] = p.angmom; d_inertia[add_idx] = p.inertia; d_net_force[add_idx] = p.net_force; d_net_torque[add_idx] = p.net_torque; for (unsigned int j = 0; j < 6; ++j) d_net_virial[j*net_virial_pitch+add_idx] = p.net_virial[j]; d_tag[add_idx] = p.tag; d_rtag[p.tag] = add_idx; d_comm_flags[add_idx] = 0; } /*! \param old_nparticles old local particle count \param num_add_ptls Number of particles in input array \param d_pos Device array of particle positions \param d_vel Device iarray of particle velocities \param d_accel Device array of particle accelerations \param d_charge Device array of particle charges \param d_diameter Device array of particle diameters \param d_image Device array of particle images \param d_body Device array of particle body tags \param d_orientation Device array of particle orientations \param d_angmom Device array of particle angular momenta \param d_inertia Device array of particle moments of inertia \param d_net_force Net force \param d_net_torque Net torque \param d_net_virial Net virial \param d_tag Device array of particle tags \param d_rtag Device array for reverse-lookup table \param d_in Device array of packed input particle data \param d_comm_flags Device array of communication flags (pdata) */ void gpu_pdata_add_particles(const unsigned int old_nparticles, const unsigned int num_add_ptls, Scalar4 *d_pos, Scalar4 *d_vel, Scalar3 *d_accel, Scalar *d_charge, Scalar *d_diameter, int3 *d_image, unsigned int *d_body, Scalar4 *d_orientation, Scalar4 *d_angmom, Scalar3 *d_inertia, Scalar4 *d_net_force, Scalar4 *d_net_torque, Scalar *d_net_virial, unsigned int net_virial_pitch, unsigned int *d_tag, unsigned int *d_rtag, const pdata_element *d_in, unsigned int *d_comm_flags) { unsigned int block_size = 512; unsigned int n_blocks = num_add_ptls/block_size + 1; hipLaunchKernelGGL(( gpu_pdata_add_particles_kernel), dim3(n_blocks), dim3(block_size), 0, 0, old_nparticles, num_add_ptls, d_pos, d_vel, d_accel, d_charge, d_diameter, d_image, d_body, d_orientation, d_angmom, d_inertia, d_net_force, d_net_torque, d_net_virial, net_virial_pitch, d_tag, d_rtag, d_in, d_comm_flags); } #endif // ENABLE_MPI
a0b477917ff3dcb159c7c9bfb76cdb70491c5ecc.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: jglaser #include "ParticleData.cuh" /*! \file ParticleData.cu \brief ImplementsGPU kernel code and data structure functions used by ParticleData */ #ifdef ENABLE_MPI #include <iterator> #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/scatter.h> #include <thrust/device_ptr.h> #include "hoomd/extern/kernels/scan.cuh" //! Kernel to partition particle data __global__ void gpu_scatter_particle_data_kernel( const unsigned int N, const Scalar4 *d_pos, const Scalar4 *d_vel, const Scalar3 *d_accel, const Scalar *d_charge, const Scalar *d_diameter, const int3 *d_image, const unsigned int *d_body, const Scalar4 *d_orientation, const Scalar4 *d_angmom, const Scalar3 *d_inertia, const Scalar4 *d_net_force, const Scalar4 *d_net_torque, const Scalar *d_net_virial, unsigned int net_virial_pitch, const unsigned int *d_tag, unsigned int *d_rtag, Scalar4 *d_pos_alt, Scalar4 *d_vel_alt, Scalar3 *d_accel_alt, Scalar *d_charge_alt, Scalar *d_diameter_alt, int3 *d_image_alt, unsigned int *d_body_alt, Scalar4 *d_orientation_alt, Scalar4 *d_angmom_alt, Scalar3 *d_inertia_alt, Scalar4 *d_net_force_alt, Scalar4 *d_net_torque_alt, Scalar *d_net_virial_alt, unsigned int *d_tag_alt, pdata_element *d_out, unsigned int *d_comm_flags, unsigned int *d_comm_flags_out, const unsigned int *d_scan) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= N) return; bool remove = d_comm_flags[idx]; unsigned int scan_remove = d_scan[idx]; unsigned int scan_keep = idx - scan_remove; if (remove) { pdata_element p; p.pos = d_pos[idx]; p.vel = d_vel[idx]; p.accel = d_accel[idx]; p.charge = d_charge[idx]; p.diameter = d_diameter[idx]; p.image = d_image[idx]; p.body = d_body[idx]; p.orientation = d_orientation[idx]; p.angmom = d_angmom[idx]; p.inertia = d_inertia[idx]; p.net_force = d_net_force[idx]; p.net_torque = d_net_torque[idx]; for (unsigned int j = 0; j < 6; ++j) p.net_virial[j] = d_net_virial[j*net_virial_pitch+idx]; p.tag = d_tag[idx]; d_out[scan_remove] = p; d_comm_flags_out[scan_remove] = d_comm_flags[idx]; // reset communication flags d_comm_flags[idx] = 0; // reset rtag d_rtag[p.tag] = NOT_LOCAL; } else { d_pos_alt[scan_keep] = d_pos[idx]; d_vel_alt[scan_keep] = d_vel[idx]; d_accel_alt[scan_keep] = d_accel[idx]; d_charge_alt[scan_keep] = d_charge[idx]; d_diameter_alt[scan_keep] = d_diameter[idx]; d_image_alt[scan_keep] = d_image[idx]; d_body_alt[scan_keep] = d_body[idx]; d_orientation_alt[scan_keep] = d_orientation[idx]; d_angmom_alt[scan_keep] = d_angmom[idx]; d_inertia_alt[scan_keep] = d_inertia[idx]; d_net_force_alt[scan_keep] = d_net_force[idx]; d_net_torque_alt[scan_keep] = d_net_torque[idx]; for (unsigned int j = 0; j < 6; ++j) d_net_virial_alt[j*net_virial_pitch+scan_keep] = d_net_virial[j*net_virial_pitch+idx]; unsigned int tag = d_tag[idx]; d_tag_alt[scan_keep] = tag; // update rtag d_rtag[tag] = scan_keep; } } __global__ void gpu_select_sent_particles( unsigned int N, unsigned int *d_comm_flags, unsigned int *d_tmp) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= N) return; d_tmp[idx] = d_comm_flags[idx] ? 1 : 0; } /*! \param N Number of local particles \param d_pos Device array of particle positions \param d_vel Device array of particle velocities \param d_accel Device array of particle accelerations \param d_charge Device array of particle charges \param d_diameter Device array of particle diameters \param d_image Device array of particle images \param d_body Device array of particle body tags \param d_orientation Device array of particle orientations \param d_angmom Device array of particle angular momenta \param d_inertia Device array of particle moments of inertia \param d_net_force Net force \param d_net_torque Net torque \param d_net_virial Net virial \param net_virial_pitch Pitch of net virial array \param d_tag Device array of particle tags \param d_rtag Device array for reverse-lookup table \param d_pos_alt Device array of particle positions (output) \param d_vel_alt Device array of particle velocities (output) \param d_accel_alt Device array of particle accelerations (output) \param d_charge_alt Device array of particle charges (output) \param d_diameter_alt Device array of particle diameters (output) \param d_image_alt Device array of particle images (output) \param d_body_alt Device array of particle body tags (output) \param d_orientation_alt Device array of particle orientations (output) \param d_angmom_alt Device array of particle angular momenta (output) \param d_inertia Device array of particle moments of inertia (output) \param d_net_force Net force (output) \param d_net_torque Net torque (output) \param d_net_virial Net virial (output) \param d_out Output array for packed particle data \param max_n_out Maximum number of elements to write to output array \returns Number of elements marked for removal */ unsigned int gpu_pdata_remove(const unsigned int N, const Scalar4 *d_pos, const Scalar4 *d_vel, const Scalar3 *d_accel, const Scalar *d_charge, const Scalar *d_diameter, const int3 *d_image, const unsigned int *d_body, const Scalar4 *d_orientation, const Scalar4 *d_angmom, const Scalar3 *d_inertia, const Scalar4 *d_net_force, const Scalar4 *d_net_torque, const Scalar *d_net_virial, unsigned int net_virial_pitch, const unsigned int *d_tag, unsigned int *d_rtag, Scalar4 *d_pos_alt, Scalar4 *d_vel_alt, Scalar3 *d_accel_alt, Scalar *d_charge_alt, Scalar *d_diameter_alt, int3 *d_image_alt, unsigned int *d_body_alt, Scalar4 *d_orientation_alt, Scalar4 *d_angmom_alt, Scalar3 *d_inertia_alt, Scalar4 *d_net_force_alt, Scalar4 *d_net_torque_alt, Scalar *d_net_virial_alt, unsigned int *d_tag_alt, pdata_element *d_out, unsigned int *d_comm_flags, unsigned int *d_comm_flags_out, unsigned int max_n_out, unsigned int *d_tmp, mgpu::ContextPtr mgpu_context) { unsigned int n_out; // partition particle data into local and removed particles unsigned int block_size =512; unsigned int n_blocks = N/block_size+1; // select nonzero communication flags gpu_select_sent_particles<<<n_blocks, block_size>>>( N, d_comm_flags, d_tmp); // perform a scan over the array of ones and zeroes mgpu::Scan<mgpu::MgpuScanTypeExc>(d_tmp, N, (unsigned int) 0, mgpu::plus<unsigned int>(), (unsigned int *)NULL, &n_out, d_tmp, *mgpu_context); // Don't write past end of buffer if (n_out <= max_n_out) { // partition particle data into local and removed particles unsigned int block_size =512; unsigned int n_blocks = N/block_size+1; gpu_scatter_particle_data_kernel<<<n_blocks, block_size>>>( N, d_pos, d_vel, d_accel, d_charge, d_diameter, d_image, d_body, d_orientation, d_angmom, d_inertia, d_net_force, d_net_torque, d_net_virial, net_virial_pitch, d_tag, d_rtag, d_pos_alt, d_vel_alt, d_accel_alt, d_charge_alt, d_diameter_alt, d_image_alt, d_body_alt, d_orientation_alt, d_angmom_alt, d_inertia_alt, d_net_force_alt, d_net_torque_alt, d_net_virial_alt, d_tag_alt, d_out, d_comm_flags, d_comm_flags_out, d_tmp); } // return elements written to output stream return n_out; } __global__ void gpu_pdata_add_particles_kernel(unsigned int old_nparticles, unsigned int num_add_ptls, Scalar4 *d_pos, Scalar4 *d_vel, Scalar3 *d_accel, Scalar *d_charge, Scalar *d_diameter, int3 *d_image, unsigned int *d_body, Scalar4 *d_orientation, Scalar4 *d_angmom, Scalar3 *d_inertia, Scalar4 *d_net_force, Scalar4 *d_net_torque, Scalar *d_net_virial, unsigned int net_virial_pitch, unsigned int *d_tag, unsigned int *d_rtag, const pdata_element *d_in, unsigned int *d_comm_flags) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= num_add_ptls) return; pdata_element p = d_in[idx]; unsigned int add_idx = old_nparticles + idx; d_pos[add_idx] = p.pos; d_vel[add_idx] = p.vel; d_accel[add_idx] = p.accel; d_charge[add_idx] = p.charge; d_diameter[add_idx] = p.diameter; d_image[add_idx] = p.image; d_body[add_idx] = p.body; d_orientation[add_idx] = p.orientation; d_angmom[add_idx] = p.angmom; d_inertia[add_idx] = p.inertia; d_net_force[add_idx] = p.net_force; d_net_torque[add_idx] = p.net_torque; for (unsigned int j = 0; j < 6; ++j) d_net_virial[j*net_virial_pitch+add_idx] = p.net_virial[j]; d_tag[add_idx] = p.tag; d_rtag[p.tag] = add_idx; d_comm_flags[add_idx] = 0; } /*! \param old_nparticles old local particle count \param num_add_ptls Number of particles in input array \param d_pos Device array of particle positions \param d_vel Device iarray of particle velocities \param d_accel Device array of particle accelerations \param d_charge Device array of particle charges \param d_diameter Device array of particle diameters \param d_image Device array of particle images \param d_body Device array of particle body tags \param d_orientation Device array of particle orientations \param d_angmom Device array of particle angular momenta \param d_inertia Device array of particle moments of inertia \param d_net_force Net force \param d_net_torque Net torque \param d_net_virial Net virial \param d_tag Device array of particle tags \param d_rtag Device array for reverse-lookup table \param d_in Device array of packed input particle data \param d_comm_flags Device array of communication flags (pdata) */ void gpu_pdata_add_particles(const unsigned int old_nparticles, const unsigned int num_add_ptls, Scalar4 *d_pos, Scalar4 *d_vel, Scalar3 *d_accel, Scalar *d_charge, Scalar *d_diameter, int3 *d_image, unsigned int *d_body, Scalar4 *d_orientation, Scalar4 *d_angmom, Scalar3 *d_inertia, Scalar4 *d_net_force, Scalar4 *d_net_torque, Scalar *d_net_virial, unsigned int net_virial_pitch, unsigned int *d_tag, unsigned int *d_rtag, const pdata_element *d_in, unsigned int *d_comm_flags) { unsigned int block_size = 512; unsigned int n_blocks = num_add_ptls/block_size + 1; gpu_pdata_add_particles_kernel<<<n_blocks, block_size>>>(old_nparticles, num_add_ptls, d_pos, d_vel, d_accel, d_charge, d_diameter, d_image, d_body, d_orientation, d_angmom, d_inertia, d_net_force, d_net_torque, d_net_virial, net_virial_pitch, d_tag, d_rtag, d_in, d_comm_flags); } #endif // ENABLE_MPI
aa4e1c524d9dafdb5e0e6e2fb822523cdb75b7b6.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/core.hpp> #include <opencv2/imgcodecs.hpp> #include <iostream> #include <cmath> // Standard CUDA API functions #include <hip/hip_runtime_api.h> // Error checking macro #define cudaCheckError(code) \ { \ if ((code) != hipSuccess) { \ fprintf(stderr, "Cuda failure %s:%d: '%s' \n", __FILE__, __LINE__, \ hipGetErrorString(code)); \ } \ } struct light { float x; float y; float radius; float brightness; }; __device__ float light_brightness( float x, float y, size_t width, size_t height, const light &light) { float norm_x = x / width; float norm_y = y / height; float dx = norm_x - light.x; float dy = norm_y - light.y; float distance_squared = dx * dx + dy * dy; if (distance_squared > light.radius * light.radius) { return 0; } float distance = sqrtf(distance_squared); float scaled_distance = distance / light.radius; if (scaled_distance > 0.8) { return (1.0f - (scaled_distance - 0.8f) * 5.0f) * light.brightness; } else { return light.brightness; } } template <typename T> __device__ T *pointer2d(T *base_pointer, int x, int y, size_t pitch) { return (T *)((uchar *)base_pointer + y * pitch) + x; } __device__ float clamp(float value) {return value > 1.0f ? 1.0f : value;} __global__ void spotlights( uchar* input_data, uchar* output_data, size_t width, size_t height, size_t pitch, float ambient, light light1, light light2, light light3, light light4) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; float brightness = ambient + light_brightness(x, y, width, height, light1) + light_brightness(x, y, width, height, light2) + light_brightness(x, y, width, height, light3) + light_brightness(x, y, width, height, light4); *pointer2d(output_data, x, y, pitch) = *pointer2d(input_data, x, y, pitch) * clamp(brightness); } int main() { std::string image_path = "../data/starry_night.png"; cv::Mat img = cv::imread(image_path, cv::IMREAD_COLOR); if(img.empty()) { std::cout << "Could not read the image: " << image_path << std::endl; return 1; } cv::Mat output_img; output_img.create(img.rows, img.cols, CV_8UC3); light light1 = {0.2, 0.1, 0.1, 4.0}; light light2 = {0.25, 0.2, 0.075, 2.0}; light light3 = {0.5, 0.5, 0.3, 0.3}; light light4 = {0.7, 0.65, 0.15, 0.8}; size_t channels = img.channels(); size_t width = img.cols * channels; size_t height = img.rows; size_t byte_width = width * sizeof(uchar); size_t pitch; uchar *input_data_2d, *output_data_2d; // Allocate 2D aligned image cudaCheckError( hipMallocPitch(&input_data_2d, &pitch, byte_width, height)); cudaCheckError( hipMallocPitch(&output_data_2d, &pitch, byte_width, height)); cudaCheckError( hipMemcpy2D( input_data_2d, pitch, img.data, byte_width, byte_width, height, hipMemcpyHostToDevice)); std::cout << "byte width: " << byte_width << std::endl; std::cout << "pitch: " << pitch << std::endl; dim3 block_dim(32, 16); dim3 grid_dim( (width + block_dim.x - 1) / block_dim.x, (height + block_dim.y - 1) / block_dim.y ); hipLaunchKernelGGL(( spotlights), dim3(grid_dim), dim3(block_dim), 0, 0, input_data_2d, output_data_2d, width, height, pitch, 0.3, light1, light2, light3, light4); cudaCheckError( hipMemcpy2D( output_img.data, byte_width, output_data_2d, pitch, byte_width, height, hipMemcpyDeviceToHost)); cudaCheckError(hipFree(input_data_2d)); cudaCheckError(hipFree(output_data_2d)); cv::imwrite("test.png", output_img); return 0; }
aa4e1c524d9dafdb5e0e6e2fb822523cdb75b7b6.cu
#include <opencv2/core.hpp> #include <opencv2/imgcodecs.hpp> #include <iostream> #include <cmath> // Standard CUDA API functions #include <cuda_runtime_api.h> // Error checking macro #define cudaCheckError(code) \ { \ if ((code) != cudaSuccess) { \ fprintf(stderr, "Cuda failure %s:%d: '%s' \n", __FILE__, __LINE__, \ cudaGetErrorString(code)); \ } \ } struct light { float x; float y; float radius; float brightness; }; __device__ float light_brightness( float x, float y, size_t width, size_t height, const light &light) { float norm_x = x / width; float norm_y = y / height; float dx = norm_x - light.x; float dy = norm_y - light.y; float distance_squared = dx * dx + dy * dy; if (distance_squared > light.radius * light.radius) { return 0; } float distance = sqrtf(distance_squared); float scaled_distance = distance / light.radius; if (scaled_distance > 0.8) { return (1.0f - (scaled_distance - 0.8f) * 5.0f) * light.brightness; } else { return light.brightness; } } template <typename T> __device__ T *pointer2d(T *base_pointer, int x, int y, size_t pitch) { return (T *)((uchar *)base_pointer + y * pitch) + x; } __device__ float clamp(float value) {return value > 1.0f ? 1.0f : value;} __global__ void spotlights( uchar* input_data, uchar* output_data, size_t width, size_t height, size_t pitch, float ambient, light light1, light light2, light light3, light light4) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; float brightness = ambient + light_brightness(x, y, width, height, light1) + light_brightness(x, y, width, height, light2) + light_brightness(x, y, width, height, light3) + light_brightness(x, y, width, height, light4); *pointer2d(output_data, x, y, pitch) = *pointer2d(input_data, x, y, pitch) * clamp(brightness); } int main() { std::string image_path = "../data/starry_night.png"; cv::Mat img = cv::imread(image_path, cv::IMREAD_COLOR); if(img.empty()) { std::cout << "Could not read the image: " << image_path << std::endl; return 1; } cv::Mat output_img; output_img.create(img.rows, img.cols, CV_8UC3); light light1 = {0.2, 0.1, 0.1, 4.0}; light light2 = {0.25, 0.2, 0.075, 2.0}; light light3 = {0.5, 0.5, 0.3, 0.3}; light light4 = {0.7, 0.65, 0.15, 0.8}; size_t channels = img.channels(); size_t width = img.cols * channels; size_t height = img.rows; size_t byte_width = width * sizeof(uchar); size_t pitch; uchar *input_data_2d, *output_data_2d; // Allocate 2D aligned image cudaCheckError( cudaMallocPitch(&input_data_2d, &pitch, byte_width, height)); cudaCheckError( cudaMallocPitch(&output_data_2d, &pitch, byte_width, height)); cudaCheckError( cudaMemcpy2D( input_data_2d, pitch, img.data, byte_width, byte_width, height, cudaMemcpyHostToDevice)); std::cout << "byte width: " << byte_width << std::endl; std::cout << "pitch: " << pitch << std::endl; dim3 block_dim(32, 16); dim3 grid_dim( (width + block_dim.x - 1) / block_dim.x, (height + block_dim.y - 1) / block_dim.y ); spotlights<<<grid_dim, block_dim>>>( input_data_2d, output_data_2d, width, height, pitch, 0.3, light1, light2, light3, light4); cudaCheckError( cudaMemcpy2D( output_img.data, byte_width, output_data_2d, pitch, byte_width, height, cudaMemcpyDeviceToHost)); cudaCheckError(cudaFree(input_data_2d)); cudaCheckError(cudaFree(output_data_2d)); cv::imwrite("test.png", output_img); return 0; }
a95929a75a4ca6f1dfbfbf0db1dadd0e371e00ea.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex ess or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <benchmark/benchmark.h> #include <hip/hip_runtime_api.h> #include <rmm/thrust_rmm_allocator.h> #include <rmm/device_uvector.hpp> #include "rmm/mr/device/cnmem_memory_resource.hpp" #include "rmm/mr/device/default_memory_resource.hpp" static void BM_UvectorSizeConstruction(benchmark::State& state) { rmm::mr::cnmem_memory_resource mr{}; rmm::mr::set_default_resource(&mr); for (auto _ : state) { rmm::device_uvector<int32_t>(state.range(0), hipStream_t{0}); hipDeviceSynchronize(); } } BENCHMARK(BM_UvectorSizeConstruction) ->RangeMultiplier(10) ->Range(10'000, 1'000'000'000) ->Unit(benchmark::kMicrosecond); static void BM_ThrustVectorSizeConstruction(benchmark::State& state) { rmm::mr::cnmem_memory_resource mr{}; rmm::mr::set_default_resource(&mr); for (auto _ : state) { rmm::device_vector<int32_t>(state.range(0)); hipDeviceSynchronize(); } } BENCHMARK(BM_ThrustVectorSizeConstruction) ->RangeMultiplier(10) ->Range(10'000, 1'000'000'000) ->Unit(benchmark::kMicrosecond); BENCHMARK_MAIN();
a95929a75a4ca6f1dfbfbf0db1dadd0e371e00ea.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex ess or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <benchmark/benchmark.h> #include <cuda_runtime_api.h> #include <rmm/thrust_rmm_allocator.h> #include <rmm/device_uvector.hpp> #include "rmm/mr/device/cnmem_memory_resource.hpp" #include "rmm/mr/device/default_memory_resource.hpp" static void BM_UvectorSizeConstruction(benchmark::State& state) { rmm::mr::cnmem_memory_resource mr{}; rmm::mr::set_default_resource(&mr); for (auto _ : state) { rmm::device_uvector<int32_t>(state.range(0), cudaStream_t{0}); cudaDeviceSynchronize(); } } BENCHMARK(BM_UvectorSizeConstruction) ->RangeMultiplier(10) ->Range(10'000, 1'000'000'000) ->Unit(benchmark::kMicrosecond); static void BM_ThrustVectorSizeConstruction(benchmark::State& state) { rmm::mr::cnmem_memory_resource mr{}; rmm::mr::set_default_resource(&mr); for (auto _ : state) { rmm::device_vector<int32_t>(state.range(0)); cudaDeviceSynchronize(); } } BENCHMARK(BM_ThrustVectorSizeConstruction) ->RangeMultiplier(10) ->Range(10'000, 1'000'000'000) ->Unit(benchmark::kMicrosecond); BENCHMARK_MAIN();
7a7b3ad5f17e0bfe5c5095c069dca00cfec788ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample evaluates fair call and put prices for a * given set of European options by Black-Scholes formula. * See supplied whitepaper for more explanations. */ #include <helper_functions.h> // helper functions for string parsing #include <helper_cuda.h> // helper functions CUDA error checking and initialization //////////////////////////////////////////////////////////////////////////////// // Process an array of optN options on CPU //////////////////////////////////////////////////////////////////////////////// extern "C" void BlackScholesCPU( float *h_CallResult, float *h_PutResult, float *h_StockPrice, float *h_OptionStrike, float *h_OptionYears, float Riskfree, float Volatility, int optN ); //////////////////////////////////////////////////////////////////////////////// // Process an array of OptN options on GPU //////////////////////////////////////////////////////////////////////////////// #include "BlackScholes_kernel.cuh" //////////////////////////////////////////////////////////////////////////////// // Helper function, returning uniformly distributed // random float in [low, high] range //////////////////////////////////////////////////////////////////////////////// float RandFloat(float low, float high) { float t = (float)rand() / (float)RAND_MAX; return (1.0f - t) * low + t * high; } //////////////////////////////////////////////////////////////////////////////// // Data configuration //////////////////////////////////////////////////////////////////////////////// const int OPT_N = 4000000; //const int NUM_ITERATIONS = 512; const int NUM_ITERATIONS = 10; const int OPT_SZ = OPT_N * sizeof(float); const float RISKFREE = 0.02f; const float VOLATILITY = 0.30f; #define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) ) //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // Start logs printf("[%s] - Starting...\n", argv[0]); //'h_' prefix - CPU (host) memory space float //Results calculated by CPU for reference *h_CallResultCPU, *h_PutResultCPU, //CPU copy of GPU results *h_CallResultGPU, *h_PutResultGPU, //CPU instance of input data *h_StockPrice, *h_OptionStrike, *h_OptionYears; //'d_' prefix - GPU (device) memory space float //Results calculated by GPU *d_CallResult, *d_PutResult, //GPU instance of input data *d_StockPrice, *d_OptionStrike, *d_OptionYears; double delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime; StopWatchInterface *hTimer = NULL; int i; findCudaDevice(argc, (const char **)argv); sdkCreateTimer(&hTimer); printf("Initializing data...\n"); printf("...allocating CPU memory for options.\n"); h_CallResultCPU = (float *)malloc(OPT_SZ); h_PutResultCPU = (float *)malloc(OPT_SZ); h_CallResultGPU = (float *)malloc(OPT_SZ); h_PutResultGPU = (float *)malloc(OPT_SZ); h_StockPrice = (float *)malloc(OPT_SZ); h_OptionStrike = (float *)malloc(OPT_SZ); h_OptionYears = (float *)malloc(OPT_SZ); printf("...allocating GPU memory for options.\n"); checkCudaErrors(hipMalloc((void **)&d_CallResult, OPT_SZ)); checkCudaErrors(hipMalloc((void **)&d_PutResult, OPT_SZ)); checkCudaErrors(hipMalloc((void **)&d_StockPrice, OPT_SZ)); checkCudaErrors(hipMalloc((void **)&d_OptionStrike, OPT_SZ)); checkCudaErrors(hipMalloc((void **)&d_OptionYears, OPT_SZ)); printf("...generating input data in CPU mem.\n"); srand(5347); //Generate options set for (i = 0; i < OPT_N; i++) { h_CallResultCPU[i] = 0.0f; h_PutResultCPU[i] = -1.0f; h_StockPrice[i] = RandFloat(5.0f, 30.0f); h_OptionStrike[i] = RandFloat(1.0f, 100.0f); h_OptionYears[i] = RandFloat(0.25f, 10.0f); } printf("...copying input data to GPU mem.\n"); //Copy options data to GPU memory for further processing checkCudaErrors(hipMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, hipMemcpyHostToDevice)); printf("Data init done.\n\n"); printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS); checkCudaErrors(hipDeviceSynchronize()); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); for (i = 0; i < NUM_ITERATIONS; i++) { hipLaunchKernelGGL(( BlackScholesGPU), dim3(DIV_UP((OPT_N/2), 64)), dim3(64/*480), 128*/, 0, 0, (float2 *)d_CallResult, (float2 *)d_PutResult, (float2 *)d_StockPrice, (float2 *)d_OptionStrike, (float2 *)d_OptionYears, RISKFREE, VOLATILITY, OPT_N ); getLastCudaError("BlackScholesGPU() execution failed\n"); } checkCudaErrors(hipDeviceSynchronize()); sdkStopTimer(&hTimer); gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS; //Both call and put is calculated printf("Options count : %i \n", 2 * OPT_N); printf("BlackScholesGPU() time : %f msec\n", gpuTime); printf("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3)); printf("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3)); printf("BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n", (((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 64); printf("\nReading back GPU results...\n"); //Read back GPU results to compare them to CPU results checkCudaErrors(hipMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, hipMemcpyDeviceToHost)); printf("Checking the results...\n"); printf("...running CPU calculations.\n\n"); //Calculate options values on CPU BlackScholesCPU( h_CallResultCPU, h_PutResultCPU, h_StockPrice, h_OptionStrike, h_OptionYears, RISKFREE, VOLATILITY, OPT_N ); printf("Comparing the results...\n"); //Calculate max absolute difference and L1 distance //between CPU and GPU results sum_delta = 0; sum_ref = 0; max_delta = 0; for (i = 0; i < OPT_N; i++) { ref = h_CallResultCPU[i]; delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]); if (delta > max_delta) { max_delta = delta; } sum_delta += delta; sum_ref += fabs(ref); } L1norm = sum_delta / sum_ref; printf("L1 norm: %E\n", L1norm); printf("Max absolute error: %E\n\n", max_delta); printf("Shutting down...\n"); printf("...releasing GPU memory.\n"); checkCudaErrors(hipFree(d_OptionYears)); checkCudaErrors(hipFree(d_OptionStrike)); checkCudaErrors(hipFree(d_StockPrice)); checkCudaErrors(hipFree(d_PutResult)); checkCudaErrors(hipFree(d_CallResult)); printf("...releasing CPU memory.\n"); free(h_OptionYears); free(h_OptionStrike); free(h_StockPrice); free(h_PutResultGPU); free(h_CallResultGPU); free(h_PutResultCPU); free(h_CallResultCPU); sdkDeleteTimer(&hTimer); printf("Shutdown done.\n"); printf("\n[BlackScholes] - Test Summary\n"); if (L1norm > 1e-6) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n\n"); printf("Test passed\n"); exit(EXIT_SUCCESS); }
7a7b3ad5f17e0bfe5c5095c069dca00cfec788ec.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample evaluates fair call and put prices for a * given set of European options by Black-Scholes formula. * See supplied whitepaper for more explanations. */ #include <helper_functions.h> // helper functions for string parsing #include <helper_cuda.h> // helper functions CUDA error checking and initialization //////////////////////////////////////////////////////////////////////////////// // Process an array of optN options on CPU //////////////////////////////////////////////////////////////////////////////// extern "C" void BlackScholesCPU( float *h_CallResult, float *h_PutResult, float *h_StockPrice, float *h_OptionStrike, float *h_OptionYears, float Riskfree, float Volatility, int optN ); //////////////////////////////////////////////////////////////////////////////// // Process an array of OptN options on GPU //////////////////////////////////////////////////////////////////////////////// #include "BlackScholes_kernel.cuh" //////////////////////////////////////////////////////////////////////////////// // Helper function, returning uniformly distributed // random float in [low, high] range //////////////////////////////////////////////////////////////////////////////// float RandFloat(float low, float high) { float t = (float)rand() / (float)RAND_MAX; return (1.0f - t) * low + t * high; } //////////////////////////////////////////////////////////////////////////////// // Data configuration //////////////////////////////////////////////////////////////////////////////// const int OPT_N = 4000000; //const int NUM_ITERATIONS = 512; const int NUM_ITERATIONS = 10; const int OPT_SZ = OPT_N * sizeof(float); const float RISKFREE = 0.02f; const float VOLATILITY = 0.30f; #define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) ) //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // Start logs printf("[%s] - Starting...\n", argv[0]); //'h_' prefix - CPU (host) memory space float //Results calculated by CPU for reference *h_CallResultCPU, *h_PutResultCPU, //CPU copy of GPU results *h_CallResultGPU, *h_PutResultGPU, //CPU instance of input data *h_StockPrice, *h_OptionStrike, *h_OptionYears; //'d_' prefix - GPU (device) memory space float //Results calculated by GPU *d_CallResult, *d_PutResult, //GPU instance of input data *d_StockPrice, *d_OptionStrike, *d_OptionYears; double delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime; StopWatchInterface *hTimer = NULL; int i; findCudaDevice(argc, (const char **)argv); sdkCreateTimer(&hTimer); printf("Initializing data...\n"); printf("...allocating CPU memory for options.\n"); h_CallResultCPU = (float *)malloc(OPT_SZ); h_PutResultCPU = (float *)malloc(OPT_SZ); h_CallResultGPU = (float *)malloc(OPT_SZ); h_PutResultGPU = (float *)malloc(OPT_SZ); h_StockPrice = (float *)malloc(OPT_SZ); h_OptionStrike = (float *)malloc(OPT_SZ); h_OptionYears = (float *)malloc(OPT_SZ); printf("...allocating GPU memory for options.\n"); checkCudaErrors(cudaMalloc((void **)&d_CallResult, OPT_SZ)); checkCudaErrors(cudaMalloc((void **)&d_PutResult, OPT_SZ)); checkCudaErrors(cudaMalloc((void **)&d_StockPrice, OPT_SZ)); checkCudaErrors(cudaMalloc((void **)&d_OptionStrike, OPT_SZ)); checkCudaErrors(cudaMalloc((void **)&d_OptionYears, OPT_SZ)); printf("...generating input data in CPU mem.\n"); srand(5347); //Generate options set for (i = 0; i < OPT_N; i++) { h_CallResultCPU[i] = 0.0f; h_PutResultCPU[i] = -1.0f; h_StockPrice[i] = RandFloat(5.0f, 30.0f); h_OptionStrike[i] = RandFloat(1.0f, 100.0f); h_OptionYears[i] = RandFloat(0.25f, 10.0f); } printf("...copying input data to GPU mem.\n"); //Copy options data to GPU memory for further processing checkCudaErrors(cudaMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, cudaMemcpyHostToDevice)); printf("Data init done.\n\n"); printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS); checkCudaErrors(cudaDeviceSynchronize()); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); for (i = 0; i < NUM_ITERATIONS; i++) { BlackScholesGPU<<<DIV_UP((OPT_N/2), 64), 64/*480, 128*/>>>( (float2 *)d_CallResult, (float2 *)d_PutResult, (float2 *)d_StockPrice, (float2 *)d_OptionStrike, (float2 *)d_OptionYears, RISKFREE, VOLATILITY, OPT_N ); getLastCudaError("BlackScholesGPU() execution failed\n"); } checkCudaErrors(cudaDeviceSynchronize()); sdkStopTimer(&hTimer); gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS; //Both call and put is calculated printf("Options count : %i \n", 2 * OPT_N); printf("BlackScholesGPU() time : %f msec\n", gpuTime); printf("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3)); printf("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3)); printf("BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n", (((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 64); printf("\nReading back GPU results...\n"); //Read back GPU results to compare them to CPU results checkCudaErrors(cudaMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, cudaMemcpyDeviceToHost)); printf("Checking the results...\n"); printf("...running CPU calculations.\n\n"); //Calculate options values on CPU BlackScholesCPU( h_CallResultCPU, h_PutResultCPU, h_StockPrice, h_OptionStrike, h_OptionYears, RISKFREE, VOLATILITY, OPT_N ); printf("Comparing the results...\n"); //Calculate max absolute difference and L1 distance //between CPU and GPU results sum_delta = 0; sum_ref = 0; max_delta = 0; for (i = 0; i < OPT_N; i++) { ref = h_CallResultCPU[i]; delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]); if (delta > max_delta) { max_delta = delta; } sum_delta += delta; sum_ref += fabs(ref); } L1norm = sum_delta / sum_ref; printf("L1 norm: %E\n", L1norm); printf("Max absolute error: %E\n\n", max_delta); printf("Shutting down...\n"); printf("...releasing GPU memory.\n"); checkCudaErrors(cudaFree(d_OptionYears)); checkCudaErrors(cudaFree(d_OptionStrike)); checkCudaErrors(cudaFree(d_StockPrice)); checkCudaErrors(cudaFree(d_PutResult)); checkCudaErrors(cudaFree(d_CallResult)); printf("...releasing CPU memory.\n"); free(h_OptionYears); free(h_OptionStrike); free(h_StockPrice); free(h_PutResultGPU); free(h_CallResultGPU); free(h_PutResultCPU); free(h_CallResultCPU); sdkDeleteTimer(&hTimer); printf("Shutdown done.\n"); printf("\n[BlackScholes] - Test Summary\n"); if (L1norm > 1e-6) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n\n"); printf("Test passed\n"); exit(EXIT_SUCCESS); }
ec59fc292cec8cbc860a1e3eb428f787672c8658.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ftl/operators/cuda/gt.hpp> #ifndef WARP_SIZE #define WARP_SIZE 32 #endif #define FULL_MASK 0xffffffff template <bool DISPARITY, bool VISUALISE> __global__ void gt_analysis_kernel( uchar4* __restrict__ colour, int cpitch, int width, int height, const float* __restrict__ depth, int dpitch, const float* __restrict__ gt, int gpitch, const uchar* __restrict__ mask, int mpitch, ftl::cuda::GTAnalysisData *out, ftl::rgbd::Camera cam, float t_min, float t_max, uchar4 colour_value ) { __shared__ int svalid; __shared__ int smissing; __shared__ int smissing_masked; __shared__ int smasked; __shared__ int sgood; __shared__ float serr; __shared__ float serr_sq; if (threadIdx.x == 0 && threadIdx.y == 0) { svalid = 0; smissing = 0; smissing_masked = 0; smasked = 0; sgood = 0; serr = 0.0f; serr_sq = 0.0f; } __syncthreads(); const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; int valid = 0; int missing = 0; int missing_masked = 0; int masked = 0; int good = 0; float err = 0.0f; float err_sq = 0.0f; const float numer = cam.baseline*cam.fx; if (x < width) { const float* __restrict__ gt_ptr = gt+x; const float* __restrict__ d_ptr = depth+x; const uchar* __restrict__ m_ptr = mask+x; for (STRIDE_Y(y, height)) { // TODO: Verify gt and depth pitch are same float gtval = gt_ptr[y*dpitch]; float dval = d_ptr[y*dpitch]; const int tmasked = (m_ptr[y*mpitch] == 0) ? 0 : 1; const int tinvalid = (dval <= cam.minDepth || dval >= cam.maxDepth) ? 1 : 0; const int tgtinvalid = (gtval > cam.minDepth && gtval < cam.maxDepth) ? 0 : 1; if (tinvalid == 0 && tgtinvalid == 0) { // if there is valid value in both (gt and depth) valid += 1; if (DISPARITY) { dval = (numer / dval); gtval = (numer / gtval); } const float e = fabsf(dval-gtval); if ((t_min < e) && (e <= t_max)) { good += 1; err += e; err_sq += e*e; if (VISUALISE) { colour[x+y*cpitch] = colour_value; } } } else if (tinvalid == 0 && tmasked == 1 && tgtinvalid == 1) { // masked and not missing (but no gt value) if (VISUALISE) { colour[x+y*cpitch] = {192, 0, 192, 255}; } // magenta } else if (tinvalid == 1 && (tmasked == 1 || tgtinvalid == 1)) { // missing and (masked or missing gt) if (VISUALISE) { colour[x+y*cpitch] = {0, 0, 0, 255}; } // black missing_masked += 1; } else if (tinvalid == 1) { // missing value (not masked) if (VISUALISE) { colour[x+y*cpitch] = {224, 32, 32, 255}; } // blue missing += 1; } masked += (tmasked == 1 || tgtinvalid == 1) ? 1 : 0; } } // Warp aggregate #pragma unroll for (int i = WARP_SIZE/2; i > 0; i /= 2) { valid += __shfl_xor_sync(FULL_MASK, valid, i, WARP_SIZE); missing += __shfl_xor_sync(FULL_MASK, missing, i, WARP_SIZE); missing_masked += __shfl_xor_sync(FULL_MASK, missing_masked, i, WARP_SIZE); masked += __shfl_xor_sync(FULL_MASK, masked, i, WARP_SIZE); good += __shfl_xor_sync(FULL_MASK, good, i, WARP_SIZE); err += __shfl_xor_sync(FULL_MASK, err, i, WARP_SIZE); err_sq += __shfl_xor_sync(FULL_MASK, err_sq, i, WARP_SIZE); } // Block aggregate if (threadIdx.x % WARP_SIZE == 0) { atomicAdd(&svalid, valid); atomicAdd(&smissing, missing); atomicAdd(&smissing_masked, missing_masked); atomicAdd(&smasked, masked); atomicAdd(&sgood, good); atomicAdd(&serr, err); atomicAdd(&serr_sq, err_sq); } __syncthreads(); // Global aggregate if (threadIdx.x == 0 && threadIdx.y == 0) { atomicAdd(&out->valid, svalid); atomicAdd(&out->missing, smissing); atomicAdd(&out->missing_masked, smissing_masked); atomicAdd(&out->masked, smasked); atomicAdd(&out->good, sgood); atomicAdd(&out->err, serr); atomicAdd(&out->err_sq, serr_sq); } } void ftl::cuda::gt_analysis( ftl::cuda::TextureObject<uchar4> &colour, ftl::cuda::TextureObject<float> &depth, ftl::cuda::TextureObject<float> &gt, ftl::cuda::TextureObject<uchar> &mask, ftl::cuda::GTAnalysisData *out, const ftl::rgbd::Camera &cam, float t_min, float t_max, uchar4 colour_value, bool use_disparity, hipStream_t stream ) { static constexpr int THREADS_X = 128; static constexpr int THREADS_Y = 2; const dim3 gridSize((depth.width() + THREADS_X - 1)/THREADS_X,16); const dim3 blockSize(THREADS_X, THREADS_Y); hipMemsetAsync(out, 0, sizeof(ftl::cuda::GTAnalysisData), stream); if (use_disparity) { hipLaunchKernelGGL(( gt_analysis_kernel<true, true>), dim3(gridSize), dim3(blockSize), 0, stream, colour.devicePtr(), colour.pixelPitch(), colour.width(), colour.height(), depth.devicePtr(), depth.pixelPitch(), gt.devicePtr(), gt.pixelPitch(), mask.devicePtr(), mask.pixelPitch(), out, cam, t_min, t_max, colour_value ); } else { hipLaunchKernelGGL(( gt_analysis_kernel<false, true>), dim3(gridSize), dim3(blockSize), 0, stream, colour.devicePtr(), colour.pixelPitch(), colour.width(), colour.height(), depth.devicePtr(), depth.pixelPitch(), gt.devicePtr(), gt.pixelPitch(), mask.devicePtr(), mask.pixelPitch(), out, cam, t_min, t_max, colour_value ); } cudaSafeCall(hipGetLastError()); #ifdef _DEBUG cudaSafeCall(hipDeviceSynchronize()); #endif } void ftl::cuda::gt_analysis( ftl::cuda::TextureObject<float> &depth, ftl::cuda::TextureObject<float> &gt, ftl::cuda::TextureObject<uchar> &mask, ftl::cuda::GTAnalysisData *out, const ftl::rgbd::Camera &cam, float t_min, float t_max, bool use_disparity, hipStream_t stream ) { static constexpr int THREADS_X = 128; static constexpr int THREADS_Y = 2; const dim3 gridSize((depth.width() + THREADS_X - 1)/THREADS_X, 16); const dim3 blockSize(THREADS_X, THREADS_Y); hipMemsetAsync(out, 0, sizeof(ftl::cuda::GTAnalysisData), stream); if (use_disparity) { hipLaunchKernelGGL(( gt_analysis_kernel<true, false>), dim3(gridSize), dim3(blockSize), 0, stream, nullptr, 0, depth.width(), depth.height(), depth.devicePtr(), depth.pixelPitch(), gt.devicePtr(), gt.pixelPitch(), mask.devicePtr(), mask.pixelPitch(), out, cam, t_min, t_max, {0,0,0,0} ); } else { hipLaunchKernelGGL(( gt_analysis_kernel<false, false>), dim3(gridSize), dim3(blockSize), 0, stream, nullptr, 0, depth.width(), depth.height(), depth.devicePtr(), depth.pixelPitch(), gt.devicePtr(), gt.pixelPitch(), mask.devicePtr(), mask.pixelPitch(), out, cam, t_min, t_max, {0,0,0,0} ); } cudaSafeCall( hipGetLastError() ); #ifdef _DEBUG cudaSafeCall(hipDeviceSynchronize()); #endif }
ec59fc292cec8cbc860a1e3eb428f787672c8658.cu
#include <ftl/operators/cuda/gt.hpp> #ifndef WARP_SIZE #define WARP_SIZE 32 #endif #define FULL_MASK 0xffffffff template <bool DISPARITY, bool VISUALISE> __global__ void gt_analysis_kernel( uchar4* __restrict__ colour, int cpitch, int width, int height, const float* __restrict__ depth, int dpitch, const float* __restrict__ gt, int gpitch, const uchar* __restrict__ mask, int mpitch, ftl::cuda::GTAnalysisData *out, ftl::rgbd::Camera cam, float t_min, float t_max, uchar4 colour_value ) { __shared__ int svalid; __shared__ int smissing; __shared__ int smissing_masked; __shared__ int smasked; __shared__ int sgood; __shared__ float serr; __shared__ float serr_sq; if (threadIdx.x == 0 && threadIdx.y == 0) { svalid = 0; smissing = 0; smissing_masked = 0; smasked = 0; sgood = 0; serr = 0.0f; serr_sq = 0.0f; } __syncthreads(); const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; int valid = 0; int missing = 0; int missing_masked = 0; int masked = 0; int good = 0; float err = 0.0f; float err_sq = 0.0f; const float numer = cam.baseline*cam.fx; if (x < width) { const float* __restrict__ gt_ptr = gt+x; const float* __restrict__ d_ptr = depth+x; const uchar* __restrict__ m_ptr = mask+x; for (STRIDE_Y(y, height)) { // TODO: Verify gt and depth pitch are same float gtval = gt_ptr[y*dpitch]; float dval = d_ptr[y*dpitch]; const int tmasked = (m_ptr[y*mpitch] == 0) ? 0 : 1; const int tinvalid = (dval <= cam.minDepth || dval >= cam.maxDepth) ? 1 : 0; const int tgtinvalid = (gtval > cam.minDepth && gtval < cam.maxDepth) ? 0 : 1; if (tinvalid == 0 && tgtinvalid == 0) { // if there is valid value in both (gt and depth) valid += 1; if (DISPARITY) { dval = (numer / dval); gtval = (numer / gtval); } const float e = fabsf(dval-gtval); if ((t_min < e) && (e <= t_max)) { good += 1; err += e; err_sq += e*e; if (VISUALISE) { colour[x+y*cpitch] = colour_value; } } } else if (tinvalid == 0 && tmasked == 1 && tgtinvalid == 1) { // masked and not missing (but no gt value) if (VISUALISE) { colour[x+y*cpitch] = {192, 0, 192, 255}; } // magenta } else if (tinvalid == 1 && (tmasked == 1 || tgtinvalid == 1)) { // missing and (masked or missing gt) if (VISUALISE) { colour[x+y*cpitch] = {0, 0, 0, 255}; } // black missing_masked += 1; } else if (tinvalid == 1) { // missing value (not masked) if (VISUALISE) { colour[x+y*cpitch] = {224, 32, 32, 255}; } // blue missing += 1; } masked += (tmasked == 1 || tgtinvalid == 1) ? 1 : 0; } } // Warp aggregate #pragma unroll for (int i = WARP_SIZE/2; i > 0; i /= 2) { valid += __shfl_xor_sync(FULL_MASK, valid, i, WARP_SIZE); missing += __shfl_xor_sync(FULL_MASK, missing, i, WARP_SIZE); missing_masked += __shfl_xor_sync(FULL_MASK, missing_masked, i, WARP_SIZE); masked += __shfl_xor_sync(FULL_MASK, masked, i, WARP_SIZE); good += __shfl_xor_sync(FULL_MASK, good, i, WARP_SIZE); err += __shfl_xor_sync(FULL_MASK, err, i, WARP_SIZE); err_sq += __shfl_xor_sync(FULL_MASK, err_sq, i, WARP_SIZE); } // Block aggregate if (threadIdx.x % WARP_SIZE == 0) { atomicAdd(&svalid, valid); atomicAdd(&smissing, missing); atomicAdd(&smissing_masked, missing_masked); atomicAdd(&smasked, masked); atomicAdd(&sgood, good); atomicAdd(&serr, err); atomicAdd(&serr_sq, err_sq); } __syncthreads(); // Global aggregate if (threadIdx.x == 0 && threadIdx.y == 0) { atomicAdd(&out->valid, svalid); atomicAdd(&out->missing, smissing); atomicAdd(&out->missing_masked, smissing_masked); atomicAdd(&out->masked, smasked); atomicAdd(&out->good, sgood); atomicAdd(&out->err, serr); atomicAdd(&out->err_sq, serr_sq); } } void ftl::cuda::gt_analysis( ftl::cuda::TextureObject<uchar4> &colour, ftl::cuda::TextureObject<float> &depth, ftl::cuda::TextureObject<float> &gt, ftl::cuda::TextureObject<uchar> &mask, ftl::cuda::GTAnalysisData *out, const ftl::rgbd::Camera &cam, float t_min, float t_max, uchar4 colour_value, bool use_disparity, cudaStream_t stream ) { static constexpr int THREADS_X = 128; static constexpr int THREADS_Y = 2; const dim3 gridSize((depth.width() + THREADS_X - 1)/THREADS_X,16); const dim3 blockSize(THREADS_X, THREADS_Y); cudaMemsetAsync(out, 0, sizeof(ftl::cuda::GTAnalysisData), stream); if (use_disparity) { gt_analysis_kernel<true, true><<<gridSize, blockSize, 0, stream>>>( colour.devicePtr(), colour.pixelPitch(), colour.width(), colour.height(), depth.devicePtr(), depth.pixelPitch(), gt.devicePtr(), gt.pixelPitch(), mask.devicePtr(), mask.pixelPitch(), out, cam, t_min, t_max, colour_value ); } else { gt_analysis_kernel<false, true><<<gridSize, blockSize, 0, stream>>>( colour.devicePtr(), colour.pixelPitch(), colour.width(), colour.height(), depth.devicePtr(), depth.pixelPitch(), gt.devicePtr(), gt.pixelPitch(), mask.devicePtr(), mask.pixelPitch(), out, cam, t_min, t_max, colour_value ); } cudaSafeCall(cudaGetLastError()); #ifdef _DEBUG cudaSafeCall(cudaDeviceSynchronize()); #endif } void ftl::cuda::gt_analysis( ftl::cuda::TextureObject<float> &depth, ftl::cuda::TextureObject<float> &gt, ftl::cuda::TextureObject<uchar> &mask, ftl::cuda::GTAnalysisData *out, const ftl::rgbd::Camera &cam, float t_min, float t_max, bool use_disparity, cudaStream_t stream ) { static constexpr int THREADS_X = 128; static constexpr int THREADS_Y = 2; const dim3 gridSize((depth.width() + THREADS_X - 1)/THREADS_X, 16); const dim3 blockSize(THREADS_X, THREADS_Y); cudaMemsetAsync(out, 0, sizeof(ftl::cuda::GTAnalysisData), stream); if (use_disparity) { gt_analysis_kernel<true, false><<<gridSize, blockSize, 0, stream>>>( nullptr, 0, depth.width(), depth.height(), depth.devicePtr(), depth.pixelPitch(), gt.devicePtr(), gt.pixelPitch(), mask.devicePtr(), mask.pixelPitch(), out, cam, t_min, t_max, {0,0,0,0} ); } else { gt_analysis_kernel<false, false><<<gridSize, blockSize, 0, stream>>>( nullptr, 0, depth.width(), depth.height(), depth.devicePtr(), depth.pixelPitch(), gt.devicePtr(), gt.pixelPitch(), mask.devicePtr(), mask.pixelPitch(), out, cam, t_min, t_max, {0,0,0,0} ); } cudaSafeCall( cudaGetLastError() ); #ifdef _DEBUG cudaSafeCall(cudaDeviceSynchronize()); #endif }
8b037c0662071906b327cd47aec90f16a8a817c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //typedef float Real; //#define USE_SINGLE_PRECISION typedef double Real; #define USE_DOUBLE_PRECISION const Real kGamma = 5.0f/3.0f; #include "../library/math.h" #include "hlld.inl" __global__ void hlld_flux_kernel (Real *Bx, Real *dens_L, Real *dens_R, Real *velx_L, Real *velx_R, Real *vely_L, Real *vely_R, Real *velz_L, Real *velz_R, Real *pres_L, Real *pres_R, Real *By_L, Real *By_R, Real *Bz_L, Real *Bz_R, Real *Fdens, Real *Fmomx, Real *Fmomy, Real *Fmomz, Real *Fetot, Real *F_By, Real *F_Bz) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; hlld_flux(Bx[tid], dens_L[tid], dens_R[tid], velx_L[tid], velx_R[tid], vely_L[tid], vely_R[tid], velz_L[tid], velz_R[tid], pres_L[tid], pres_R[tid], By_L[tid], By_R[tid], Bz_L[tid], Bz_R[tid], Fdens[tid], Fmomx[tid], Fmomy[tid], Fmomz[tid], Fetot[tid], F_By[tid], F_Bz[tid]); } int main () {}
8b037c0662071906b327cd47aec90f16a8a817c9.cu
//typedef float Real; //#define USE_SINGLE_PRECISION typedef double Real; #define USE_DOUBLE_PRECISION const Real kGamma = 5.0f/3.0f; #include "../library/math.h" #include "hlld.inl" __global__ void hlld_flux_kernel (Real *Bx, Real *dens_L, Real *dens_R, Real *velx_L, Real *velx_R, Real *vely_L, Real *vely_R, Real *velz_L, Real *velz_R, Real *pres_L, Real *pres_R, Real *By_L, Real *By_R, Real *Bz_L, Real *Bz_R, Real *Fdens, Real *Fmomx, Real *Fmomy, Real *Fmomz, Real *Fetot, Real *F_By, Real *F_Bz) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; hlld_flux(Bx[tid], dens_L[tid], dens_R[tid], velx_L[tid], velx_R[tid], vely_L[tid], vely_R[tid], velz_L[tid], velz_R[tid], pres_L[tid], pres_R[tid], By_L[tid], By_R[tid], Bz_L[tid], Bz_R[tid], Fdens[tid], Fmomx[tid], Fmomy[tid], Fmomz[tid], Fetot[tid], F_By[tid], F_Bz[tid]); } int main () {}
205d3fd8cb969d7fb5a1971c65e0a7727f9c6520.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <THHUNN/THHUNN.h> #include <THHUNN/common.h> #include <THH/THHThrustAllocator.cuh> #include <thrust/unique.h> #include <TH/THHalf.h> #include <THH/THHNumerics.cuh> #include <THH/THHTensorSort.cuh> #include <THH/THHTensorMathReduce.cuh> #include <c10/macros/Macros.h> template <typename Dtype, typename Acctype> __global__ void cunn_LookupTable_accGradParametersKernelByFeature (int64_t *indices, Dtype *grad, Dtype *grad_weight, Dtype scale, ptrdiff_t n, int64_t stride, int padding_idx) { extern __shared__ char buf[]; Acctype* smem = (Acctype*)buf; Acctype* my_s = smem + C10_WARP_SIZE*threadIdx.y; int* indices_batch = (int*)(buf + sizeof(Acctype)*C10_WARP_SIZE*blockDim.y); const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) { // Entire block cooperates to load a batch of 1024 indices to process int tid = threadIdx.x + threadIdx.y*blockDim.x; if(batch_start + tid < n) indices_batch[tid] = (int)(indices[batch_start + tid]); // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 for(int chunk_start = batch_start; chunk_start < n; chunk_start += blockDim.y) { // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group // leaders are done with their accumulates before other warps start loading again. __syncthreads(); int n_this_chunk = (n - chunk_start) < blockDim.y ? (n - chunk_start) : blockDim.y; int src_row = chunk_start + threadIdx.y; int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight // All warps load their smem segments with incoming grad data if(src_row < n && f < s && dst_row != padding_idx) my_s[threadIdx.x] = ScalarConvert<Dtype, Acctype>::to(scale*grad[src_row*stride + f]); __syncthreads(); // To ensure determinism, we can't just have each warp add its grad data to its dst_row. // We need to check if any other warps pulled grad data targeting dst_row. // If so, we elect the first warp in each matching group as the leader. // Each leader warp serializes the accumulates targeting dst_row in shared memory, // then finishes by adding the accumulated buffer to dst_row in grad_weight. if(dst_row != padding_idx && src_row < n) // Per-warp exit condition { int match_found_this_thread = (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); if(threadIdx.x >= n_this_chunk) match_found_this_thread = 0; #ifdef __HIP_PLATFORM_HCC__ unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffsll(matchmask) - 1; #else unsigned int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffs(matchmask) - 1; #endif if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader { matchmask ^= (1 << first_remaining_peer); while(matchmask) { #ifdef __HIP_PLATFORM_HCC__ first_remaining_peer = __ffsll(matchmask) - 1; #else first_remaining_peer = __ffs(matchmask) - 1; #endif my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer]; matchmask ^= (1 << first_remaining_peer); } if(f < s) grad_weight[dst_row*stride + f] += ScalarConvert<Acctype, Dtype>::to(my_s[threadIdx.x]); } } } } } template <typename Dtype, typename Acctype> __global__ void cunn_LookupTable_accGradParametersKernel( int64_t *input, int64_t *indices, Dtype *gradOutput, Dtype *gradWeight, int64_t *count, Dtype defaultScale, ptrdiff_t numel, int64_t stride, int paddingValue) { int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1]) && input[idx] != paddingValue) { do { const int startFeature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weightRow = ((int) input[idx]) * stride; const int gradOutputRow = ((int) indices[idx]) * stride; const Acctype scale = count ? ScalarConvert<Dtype, Acctype>::to(defaultScale) / count[idx] : ScalarConvert<Dtype, Acctype>::to(defaultScale); Acctype gradient[SZ]; Acctype weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int featureDim = startFeature + ii * C10_WARP_SIZE; if (featureDim < stride) { gradient[ii] = ScalarConvert<Dtype, Acctype>::to(gradOutput[gradOutputRow + featureDim]); weight[ii] = ScalarConvert<Dtype, Acctype>::to(gradWeight[weightRow + featureDim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int featureDim = startFeature + ii * C10_WARP_SIZE; if (featureDim < stride) { gradWeight[weightRow + featureDim] = ScalarConvert<Acctype, Dtype>::to(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } template <typename DType, typename AccType, int Norm> struct FastPow { __host__ __device__ static inline AccType pow(DType x, AccType norm) { AccType xA = ScalarConvert<DType, AccType>::to(x); return ::pow(std::abs(xA), norm); } }; template <typename DType, typename AccType> struct FastPow<DType, AccType, 1> { __host__ __device__ static inline AccType pow(DType x, AccType _) { AccType xA = ScalarConvert<DType, AccType>::to(x); return std::abs(xA); } }; template <typename DType, typename AccType> struct FastPow<DType, AccType, 2> { __host__ __device__ static inline AccType pow(DType x, AccType _) { AccType xA = ScalarConvert<DType, AccType>::to(x); return xA * xA; } }; /* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ template <typename DType, typename AccType, typename IndexType, int Norm> __global__ void calculate_norms_and_renorm(DType *weights, THCIndex_t *indices, AccType normType, AccType maxNorm, IndexType dim) { // Some casting hacks since dynamic shared memory and templates don't work together: extern __shared__ unsigned char smem[]; AccType *sdata = reinterpret_cast<AccType *>(smem); IndexType tid = threadIdx.x; IndexType baseIndex = (indices[blockIdx.x]) * dim; AccType accZero = ScalarConvert<int, AccType>::to(0); AccType v = accZero; for (IndexType i = tid; i < dim; i += blockDim.x) { v += FastPow<DType, AccType, Norm>::pow(weights[baseIndex + i], normType); } v = reduceBlock<AccType, ReduceAdd<AccType>> (sdata, blockDim.x, v, ReduceAdd<AccType>(), accZero); if (tid == 0) { sdata[0] = ::pow(v, THCNumerics<AccType>::div(ScalarConvert<int, AccType>::to(1), normType) ); } __syncthreads(); // now we renormalize the blocks that need it if (sdata[0] > maxNorm) { DType factor = ScalarConvert<AccType, DType>::to(maxNorm / (sdata[0] + 1e-7)); for (IndexType i = tid; i < dim; i += blockDim.x) { weights[baseIndex + i] *= factor; } } } #include <THHUNN/generic/LookupTable.hip> #include <THH/THHGenerateFloatTypes.h>
205d3fd8cb969d7fb5a1971c65e0a7727f9c6520.cu
#include <THCUNN/THCUNN.h> #include <THCUNN/common.h> #include <THC/THCThrustAllocator.cuh> #include <thrust/unique.h> #include <TH/THHalf.h> #include <THC/THCNumerics.cuh> #include <THC/THCTensorSort.cuh> #include <THC/THCTensorMathReduce.cuh> #include <c10/macros/Macros.h> template <typename Dtype, typename Acctype> __global__ void cunn_LookupTable_accGradParametersKernelByFeature (int64_t *indices, Dtype *grad, Dtype *grad_weight, Dtype scale, ptrdiff_t n, int64_t stride, int padding_idx) { extern __shared__ char buf[]; Acctype* smem = (Acctype*)buf; Acctype* my_s = smem + C10_WARP_SIZE*threadIdx.y; int* indices_batch = (int*)(buf + sizeof(Acctype)*C10_WARP_SIZE*blockDim.y); const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) { // Entire block cooperates to load a batch of 1024 indices to process int tid = threadIdx.x + threadIdx.y*blockDim.x; if(batch_start + tid < n) indices_batch[tid] = (int)(indices[batch_start + tid]); // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 for(int chunk_start = batch_start; chunk_start < n; chunk_start += blockDim.y) { // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group // leaders are done with their accumulates before other warps start loading again. __syncthreads(); int n_this_chunk = (n - chunk_start) < blockDim.y ? (n - chunk_start) : blockDim.y; int src_row = chunk_start + threadIdx.y; int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight // All warps load their smem segments with incoming grad data if(src_row < n && f < s && dst_row != padding_idx) my_s[threadIdx.x] = ScalarConvert<Dtype, Acctype>::to(scale*grad[src_row*stride + f]); __syncthreads(); // To ensure determinism, we can't just have each warp add its grad data to its dst_row. // We need to check if any other warps pulled grad data targeting dst_row. // If so, we elect the first warp in each matching group as the leader. // Each leader warp serializes the accumulates targeting dst_row in shared memory, // then finishes by adding the accumulated buffer to dst_row in grad_weight. if(dst_row != padding_idx && src_row < n) // Per-warp exit condition { int match_found_this_thread = (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); if(threadIdx.x >= n_this_chunk) match_found_this_thread = 0; #ifdef __HIP_PLATFORM_HCC__ unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffsll(matchmask) - 1; #else unsigned int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffs(matchmask) - 1; #endif if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader { matchmask ^= (1 << first_remaining_peer); while(matchmask) { #ifdef __HIP_PLATFORM_HCC__ first_remaining_peer = __ffsll(matchmask) - 1; #else first_remaining_peer = __ffs(matchmask) - 1; #endif my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer]; matchmask ^= (1 << first_remaining_peer); } if(f < s) grad_weight[dst_row*stride + f] += ScalarConvert<Acctype, Dtype>::to(my_s[threadIdx.x]); } } } } } template <typename Dtype, typename Acctype> __global__ void cunn_LookupTable_accGradParametersKernel( int64_t *input, int64_t *indices, Dtype *gradOutput, Dtype *gradWeight, int64_t *count, Dtype defaultScale, ptrdiff_t numel, int64_t stride, int paddingValue) { int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1]) && input[idx] != paddingValue) { do { const int startFeature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weightRow = ((int) input[idx]) * stride; const int gradOutputRow = ((int) indices[idx]) * stride; const Acctype scale = count ? ScalarConvert<Dtype, Acctype>::to(defaultScale) / count[idx] : ScalarConvert<Dtype, Acctype>::to(defaultScale); Acctype gradient[SZ]; Acctype weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int featureDim = startFeature + ii * C10_WARP_SIZE; if (featureDim < stride) { gradient[ii] = ScalarConvert<Dtype, Acctype>::to(gradOutput[gradOutputRow + featureDim]); weight[ii] = ScalarConvert<Dtype, Acctype>::to(gradWeight[weightRow + featureDim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int featureDim = startFeature + ii * C10_WARP_SIZE; if (featureDim < stride) { gradWeight[weightRow + featureDim] = ScalarConvert<Acctype, Dtype>::to(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } template <typename DType, typename AccType, int Norm> struct FastPow { __host__ __device__ static inline AccType pow(DType x, AccType norm) { AccType xA = ScalarConvert<DType, AccType>::to(x); return std::pow(std::abs(xA), norm); } }; template <typename DType, typename AccType> struct FastPow<DType, AccType, 1> { __host__ __device__ static inline AccType pow(DType x, AccType _) { AccType xA = ScalarConvert<DType, AccType>::to(x); return std::abs(xA); } }; template <typename DType, typename AccType> struct FastPow<DType, AccType, 2> { __host__ __device__ static inline AccType pow(DType x, AccType _) { AccType xA = ScalarConvert<DType, AccType>::to(x); return xA * xA; } }; /* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ template <typename DType, typename AccType, typename IndexType, int Norm> __global__ void calculate_norms_and_renorm(DType *weights, THCIndex_t *indices, AccType normType, AccType maxNorm, IndexType dim) { // Some casting hacks since dynamic shared memory and templates don't work together: extern __shared__ unsigned char smem[]; AccType *sdata = reinterpret_cast<AccType *>(smem); IndexType tid = threadIdx.x; IndexType baseIndex = (indices[blockIdx.x]) * dim; AccType accZero = ScalarConvert<int, AccType>::to(0); AccType v = accZero; for (IndexType i = tid; i < dim; i += blockDim.x) { v += FastPow<DType, AccType, Norm>::pow(weights[baseIndex + i], normType); } v = reduceBlock<AccType, ReduceAdd<AccType>> (sdata, blockDim.x, v, ReduceAdd<AccType>(), accZero); if (tid == 0) { sdata[0] = std::pow(v, THCNumerics<AccType>::div(ScalarConvert<int, AccType>::to(1), normType) ); } __syncthreads(); // now we renormalize the blocks that need it if (sdata[0] > maxNorm) { DType factor = ScalarConvert<AccType, DType>::to(maxNorm / (sdata[0] + 1e-7)); for (IndexType i = tid; i < dim; i += blockDim.x) { weights[baseIndex + i] *= factor; } } } #include <THCUNN/generic/LookupTable.cu> #include <THC/THCGenerateFloatTypes.h>
9838920b87e84391ec9acd48d3f5c6fdd76e70f4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2015 by Contributors * \file weight_generate-inl.h * \brief * \author ZhengKai Jiang */ #include "./align_data-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #include "../../common/cuda_utils.h" #include "../mxnet_op.h" namespace mshadow { namespace cuda { inline __device__ int offset5d(int n, int k, int c, int h, int w, int N, int K, int C, int H, int W) { return n*K*C*H*W + k*C*H*W + c*H*W + h*W + w; } inline __device__ int offset(int n, int c, int h, int w, int N, int C, int H, int W) { return n*C*H*W + c*H*W + h*W + w; } template<typename DType> __global__ void AlignDataForwardKernel(const int count, int N, int K, int C, int H, int W, const DType* bottom_data, const DType* bottom_weight, DType* top_data ) { for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { const int w = index % W; const int h = (index / W) % H; const int c = (index / (H * W)) % C; const int n = (index / (C * H * W)); for (int i=0;i<K;i++) { *(top_data + index) += bottom_data[offset5d(n,i,c,h,w,N,K,C,H,W)]*bottom_weight[offset(n,i,h,w,N,K,H,W)]; } } // cuda_kernel_loop } template<typename DType> inline void AlignDataPointsForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 5, DType> &data, const Tensor<gpu, 4, DType> &weight) { const DType *bottom_data = data.dptr_; const DType *bottom_weight = weight.dptr_; DType *top_data = out.dptr_; const int count = out.shape_.Size(); // the number of threads int N = data.size(0); int K = data.size(1); int C = data.size(2); int H = data.size(3); int W = data.size(4); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "Align Data Forward"); hipStream_t stream = Stream<gpu>::GetStream(out.stream_); hipLaunchKernelGGL(( AlignDataForwardKernel<DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, count, N, K, C, H, W, bottom_data, bottom_weight, top_data); } template<typename DType> __global__ void AlignDataBackwardKernel(const int count, int N, int K, int C, int H, int W, const DType* grad_out, const DType* bottom_data, const DType* bottom_weight, DType* grad_data, DType* grad_weight) { for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { const int w = index % W; const int h = (index / W) % H; const int c = (index / (H * W)) % C; const int n = (index / (C * H * W)); for (int i=0;i<K;i++) { atomicAdd(grad_data+offset5d(n,i,c,h,w,N,K,C,H,W),grad_out[index]*bottom_weight[offset(n,i,h,w,N,K,H,W)]); atomicAdd(grad_weight+offset(n,i,h,w,N,K,H,W),grad_out[index]*bottom_data[offset5d(n,i,c,h,w,N,K,C,H,W)]); } } } template<typename DType> inline void AlignDataBackward(const Tensor<gpu, 5, DType> &grad_data, const Tensor<gpu, 4, DType> &grad_weight, const Tensor<gpu, 5, DType> &data, const Tensor<gpu, 4, DType> &weight, const Tensor<gpu, 4, DType> &grad_out) { const DType *top_grad = grad_out.dptr_; const DType *bottom_data = data.dptr_; const DType *bottom_weight = weight.dptr_; DType *bottom_grad_data = grad_data.dptr_; DType *bottom_grad_weight = grad_weight.dptr_; const int count = grad_out.shape_.Size(); // the number of threads int N = data.size(0); int K = data.size(1); int C = data.size(2); int H = data.size(3); int W = data.size(4); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "Align Data Backward"); hipStream_t stream_data = Stream<gpu>::GetStream(grad_data.stream_); hipLaunchKernelGGL(( AlignDataBackwardKernel<DType>), dim3(dimGrid), dim3(dimBlock), 0, stream_data, count, N, K, C, H, W, top_grad, bottom_data, bottom_weight, bottom_grad_data, bottom_grad_weight); } } // namespace cuda template<typename DType> inline void AlignDataForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 5, DType> &data, const Tensor<gpu, 4, DType> &weight) { cuda::AlignDataPointsForward(out, data, weight); } template<typename DType> inline void AlignDataBackward(const Tensor<gpu, 5, DType> &grad_data, const Tensor<gpu, 4, DType> &grad_weight, const Tensor<gpu, 5, DType> &data, const Tensor<gpu, 4, DType> &weight, const Tensor<gpu, 4, DType> &grad_out) { cuda::AlignDataBackward(grad_data, grad_weight, data, weight, grad_out); } } //namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(AlignDataParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new AlignDataOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
9838920b87e84391ec9acd48d3f5c6fdd76e70f4.cu
/*! * Copyright (c) 2015 by Contributors * \file weight_generate-inl.h * \brief * \author ZhengKai Jiang */ #include "./align_data-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #include "../../common/cuda_utils.h" #include "../mxnet_op.h" namespace mshadow { namespace cuda { inline __device__ int offset5d(int n, int k, int c, int h, int w, int N, int K, int C, int H, int W) { return n*K*C*H*W + k*C*H*W + c*H*W + h*W + w; } inline __device__ int offset(int n, int c, int h, int w, int N, int C, int H, int W) { return n*C*H*W + c*H*W + h*W + w; } template<typename DType> __global__ void AlignDataForwardKernel(const int count, int N, int K, int C, int H, int W, const DType* bottom_data, const DType* bottom_weight, DType* top_data ) { for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { const int w = index % W; const int h = (index / W) % H; const int c = (index / (H * W)) % C; const int n = (index / (C * H * W)); for (int i=0;i<K;i++) { *(top_data + index) += bottom_data[offset5d(n,i,c,h,w,N,K,C,H,W)]*bottom_weight[offset(n,i,h,w,N,K,H,W)]; } } // cuda_kernel_loop } template<typename DType> inline void AlignDataPointsForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 5, DType> &data, const Tensor<gpu, 4, DType> &weight) { const DType *bottom_data = data.dptr_; const DType *bottom_weight = weight.dptr_; DType *top_data = out.dptr_; const int count = out.shape_.Size(); // the number of threads int N = data.size(0); int K = data.size(1); int C = data.size(2); int H = data.size(3); int W = data.size(4); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "Align Data Forward"); cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); AlignDataForwardKernel<DType><<<dimGrid, dimBlock, 0, stream>>>(count, N, K, C, H, W, bottom_data, bottom_weight, top_data); } template<typename DType> __global__ void AlignDataBackwardKernel(const int count, int N, int K, int C, int H, int W, const DType* grad_out, const DType* bottom_data, const DType* bottom_weight, DType* grad_data, DType* grad_weight) { for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { const int w = index % W; const int h = (index / W) % H; const int c = (index / (H * W)) % C; const int n = (index / (C * H * W)); for (int i=0;i<K;i++) { atomicAdd(grad_data+offset5d(n,i,c,h,w,N,K,C,H,W),grad_out[index]*bottom_weight[offset(n,i,h,w,N,K,H,W)]); atomicAdd(grad_weight+offset(n,i,h,w,N,K,H,W),grad_out[index]*bottom_data[offset5d(n,i,c,h,w,N,K,C,H,W)]); } } } template<typename DType> inline void AlignDataBackward(const Tensor<gpu, 5, DType> &grad_data, const Tensor<gpu, 4, DType> &grad_weight, const Tensor<gpu, 5, DType> &data, const Tensor<gpu, 4, DType> &weight, const Tensor<gpu, 4, DType> &grad_out) { const DType *top_grad = grad_out.dptr_; const DType *bottom_data = data.dptr_; const DType *bottom_weight = weight.dptr_; DType *bottom_grad_data = grad_data.dptr_; DType *bottom_grad_weight = grad_weight.dptr_; const int count = grad_out.shape_.Size(); // the number of threads int N = data.size(0); int K = data.size(1); int C = data.size(2); int H = data.size(3); int W = data.size(4); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "Align Data Backward"); cudaStream_t stream_data = Stream<gpu>::GetStream(grad_data.stream_); AlignDataBackwardKernel<DType><<<dimGrid, dimBlock, 0, stream_data>>>(count, N, K, C, H, W, top_grad, bottom_data, bottom_weight, bottom_grad_data, bottom_grad_weight); } } // namespace cuda template<typename DType> inline void AlignDataForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 5, DType> &data, const Tensor<gpu, 4, DType> &weight) { cuda::AlignDataPointsForward(out, data, weight); } template<typename DType> inline void AlignDataBackward(const Tensor<gpu, 5, DType> &grad_data, const Tensor<gpu, 4, DType> &grad_weight, const Tensor<gpu, 5, DType> &data, const Tensor<gpu, 4, DType> &weight, const Tensor<gpu, 4, DType> &grad_out) { cuda::AlignDataBackward(grad_data, grad_weight, data, weight, grad_out); } } //namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(AlignDataParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new AlignDataOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
419dddc6d7f1dd3f40cb0585ae425cf84a9301c8.hip
// !!! This is a file automatically generated by hipify!!! /* * dmv_main.cu -- DMV front-end program. * * Copyright (C) 2010-2012, Computing Systems Laboratory (CSLab) * Copyright (C) 2010-2012, Vasileios Karakasis */ #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include "alloc.h" #include "dmv.h" #include "error.h" #include "gpu_util.h" #include "timer.h" #include <rocblas.h> #ifndef VALUES_MAX # define VALUES_MAX MAKE_VALUE_CONSTANT(1.) #endif #ifndef EPS # define EPS MAKE_VALUE_CONSTANT(1.e-6) #endif #ifndef NR_ITER # define NR_ITER 100 #endif static void check_result(const value_t *test, const value_t *orig, size_t n) { printf("Checking ... "); size_t i_fail = vec_equals(test, orig, n, EPS); if (!i_fail) { printf("PASSED\n"); } else { printf("FAILED (index: %ld)\n", i_fail - 1); printf("%" VALUE_FORMAT " != " "%" VALUE_FORMAT "\n", test[i_fail-1], orig[i_fail-1]); } } static void report_results(xtimer_t *timer, size_t n) { double elapsed_time = timer_elapsed_time(timer); size_t flops = 2*n*n*NR_ITER; printf("Elapsed time: %lf s\n", elapsed_time); printf("Performance: %lf Gflop/s\n", flops*1.e-9 / elapsed_time); } static void print_usage() { printf("Usage: [GPU_KERNEL=<kernel_no>] [GPU_BLOCK_SIZE=<size>] " "%s <matrix size>\n", program_name); printf("GPU_KERNEL defaults to 0\n"); printf("GPU_BLOCK_SIZE defaults to 256\n"); printf("Available kernels [id:name]:\n"); size_t i; for (i = 0; i < GPU_KERNEL_END; ++i) { printf("\t%zd:%s\n", i, gpu_kernels[i].name); } } int main(int argc, char **argv) { set_program_name(argv[0]); if (argc < 2) { warning(0, "too few arguments"); print_usage(); exit(EXIT_FAILURE); } size_t n = atoi(argv[1]); if (!n) error(0, "invalid argument: %s", argv[1]); /* Read block size and kernel to launch from the environment */ const char *env_gpu_kernel = getenv("GPU_KERNEL"); const char *env_gpu_block_size = getenv("GPU_BLOCK_SIZE"); int kernel = (env_gpu_kernel) ? atoi(env_gpu_kernel) : GPU_NAIVE; int block_size = (env_gpu_block_size) ? atoi(env_gpu_block_size) : 256; size_t orig_n = n; // original matrix size int grid_size = n % block_size == 0 ? n / block_size : n / block_size + 1; // grid size /* * Adjust appropriately (increase * only) the matrix size here if that helps you with your * kernel code, e.g., to avoid divergent warps. */ n = grid_size * block_size; printf("Matrix size: %zd\n", orig_n); printf("Adjusted matrix size: %zd\n", n); /* * Allocate the structures. * * Initialization to zero is crucial if you adjusted the matrix * size. */ value_t **A = (value_t **) calloc_2d(n, n, sizeof(**A)); if (!A) error(1, "alloc_2d failed"); value_t *x = (value_t *) calloc(n, sizeof(*x)); if (!x) error(1, "malloc failed"); value_t *y_serial = (value_t *) calloc(n, sizeof(*y_serial)); if (!y_serial) error(1, "malloc failed"); value_t *y = (value_t *) calloc(n, sizeof(*y)); if (!y) error(1, "malloc failed"); /* Initialize */ srand48(0); mat_init_rand(A, orig_n, VALUES_MAX); vec_init_rand(x, orig_n, VALUES_MAX); vec_init(y_serial, orig_n, MAKE_VALUE_CONSTANT(0.0)); vec_init(y, orig_n, MAKE_VALUE_CONSTANT(0.0)); /* Setup timers */ xtimer_t timer; /* Compute serial */ #ifdef SERIAL_KERNEL printf(">>>> Begin of record <<<<\n"); printf("Serial version:\n"); timer_clear(&timer); timer_start(&timer); for (size_t i = 0; i < NR_ITER; ++i) dmv_serial(A, x, y_serial, orig_n); timer_stop(&timer); report_results(&timer, orig_n); printf(">>>> End of record <<<<\n"); #endif // SERIAL_KERNEL #ifdef OPENMP_KERNEL /* Compute OpenMP */ printf(">>>> Begin of record <<<<\n"); printf("OpenMP version:\n"); timer_clear(&timer); timer_start(&timer); for (size_t i = 0; i < NR_ITER; ++i) dmv_omp(A, x, y, orig_n); timer_stop(&timer); #ifndef _NOCHECK_ check_result(y, y_serial, orig_n); #endif report_results(&timer, orig_n); printf(">>>> End of record <<<<\n"); #endif // OPENMP_KERNEL #ifdef GPU_KERNEL /* * Set up the blocks, grid and shared memory depending on * the kernel. Make any transformations to the input * matrix here. */ if (kernel > 0) mat_transpose(A, n); dim3 gpu_block(block_size, 1); // set up the block dimensions dim3 gpu_grid(grid_size, grid_size); // set up the grid dimensions size_t shmem_size = 0; // set up the shared memory size if (kernel == 2) { shmem_size = block_size * sizeof(value_t); } /* Cublas initializations */ value_t alpha=1.0, beta=0; hipblasHandle_t handle; hipblasCreate(&handle); printf(">>>> Begin of record <<<<\n"); printf("Block size: %dx%d\n", gpu_block.x, gpu_block.y); printf("Grid size : %dx%d\n", gpu_grid.x, gpu_grid.y); printf("Shared memory size: %ld bytes\n", shmem_size); /* GPU allocations */ value_t *gpu_A = (value_t *) gpu_alloc(n*n*sizeof(*gpu_A)); if (!gpu_A) error(0, "gpu_alloc failed: %s", gpu_get_last_errmsg()); value_t *gpu_x = (value_t *) gpu_alloc(n*sizeof(*gpu_x)); if (!gpu_x) error(0, "gpu_alloc failed: %s", gpu_get_last_errmsg()); value_t *gpu_y = (value_t *) gpu_alloc(n*sizeof(*gpu_y)); if (!gpu_y) error(0, "gpu_alloc failed: %s", gpu_get_last_errmsg()); /* Copy data to GPU */ if (copy_to_gpu(A[0], gpu_A, n*n*sizeof(*gpu_A)) < 0) error(0, "copy_to_gpu failed: %s", gpu_get_last_errmsg()); if (copy_to_gpu(x, gpu_x, n*sizeof(*gpu_x)) < 0) error(0, "copy_to_gpu failed: %s", gpu_get_last_errmsg()); /* Reset y and copy it to GPU */ vec_init(y, n, MAKE_VALUE_CONSTANT(0.0)); if (copy_to_gpu(y, gpu_y, n*sizeof(*gpu_y)) < 0) error(0, "copy_to_gpu failed: %s", gpu_get_last_errmsg()); if (kernel >= GPU_KERNEL_END) printf("GPU kernel version: cuBLAS\n"); // error(0, "the requested kernel does not exist"); else printf("GPU kernel version: %s\n", gpu_kernels[kernel].name); /* Execute and time the kernel */ timer_clear(&timer); timer_start(&timer); for (size_t i = 0; i < NR_ITER; ++i) { if (kernel < GPU_KERNEL_END) { gpu_kernels[kernel]hipLaunchKernelGGL((.fn), dim3(gpu_grid),dim3(gpu_block),shmem_size, 0, gpu_A, gpu_x, gpu_y, n); } else { hipblasSgemv(handle, HIPBLAS_OP_N, n, n, &alpha, gpu_A, n, gpu_x, 1, &beta, gpu_y, 1); } #ifdef _DEBUG_ hipError_t err; if ( (err = hipGetLastError()) != hipSuccess) error(0, "gpu kernel failed to launch: %s", gpu_get_errmsg(err)); #endif hipDeviceSynchronize(); } timer_stop(&timer); /* Copy result back to host and check */ if (copy_from_gpu(y, gpu_y, n*sizeof(*y)) < 0) error(0, "copy_from_gpu failed: %s", gpu_get_last_errmsg()); /* Fix resuts */ if (kernel < 3) { for (int i=0; i<n; i++) { y[i] /= NR_ITER; } } #ifndef _NOCHECK_ check_result(y, y_serial, orig_n); #endif report_results(&timer, orig_n); printf(">>>> End of record <<<<\n"); #endif // GPU_KERNEL /* Free resources on host */ free_2d((void **) A); free(x); free(y); free(y_serial); #ifdef GPU_KERNEL /* Free resources on GPU */ gpu_free(gpu_A); gpu_free(gpu_x); gpu_free(gpu_y); #endif // GPU_KERNEL return EXIT_SUCCESS; }
419dddc6d7f1dd3f40cb0585ae425cf84a9301c8.cu
/* * dmv_main.cu -- DMV front-end program. * * Copyright (C) 2010-2012, Computing Systems Laboratory (CSLab) * Copyright (C) 2010-2012, Vasileios Karakasis */ #include <stdlib.h> #include <stdio.h> #include <cuda.h> #include "alloc.h" #include "dmv.h" #include "error.h" #include "gpu_util.h" #include "timer.h" #include <cublas_v2.h> #ifndef VALUES_MAX # define VALUES_MAX MAKE_VALUE_CONSTANT(1.) #endif #ifndef EPS # define EPS MAKE_VALUE_CONSTANT(1.e-6) #endif #ifndef NR_ITER # define NR_ITER 100 #endif static void check_result(const value_t *test, const value_t *orig, size_t n) { printf("Checking ... "); size_t i_fail = vec_equals(test, orig, n, EPS); if (!i_fail) { printf("PASSED\n"); } else { printf("FAILED (index: %ld)\n", i_fail - 1); printf("%" VALUE_FORMAT " != " "%" VALUE_FORMAT "\n", test[i_fail-1], orig[i_fail-1]); } } static void report_results(xtimer_t *timer, size_t n) { double elapsed_time = timer_elapsed_time(timer); size_t flops = 2*n*n*NR_ITER; printf("Elapsed time: %lf s\n", elapsed_time); printf("Performance: %lf Gflop/s\n", flops*1.e-9 / elapsed_time); } static void print_usage() { printf("Usage: [GPU_KERNEL=<kernel_no>] [GPU_BLOCK_SIZE=<size>] " "%s <matrix size>\n", program_name); printf("GPU_KERNEL defaults to 0\n"); printf("GPU_BLOCK_SIZE defaults to 256\n"); printf("Available kernels [id:name]:\n"); size_t i; for (i = 0; i < GPU_KERNEL_END; ++i) { printf("\t%zd:%s\n", i, gpu_kernels[i].name); } } int main(int argc, char **argv) { set_program_name(argv[0]); if (argc < 2) { warning(0, "too few arguments"); print_usage(); exit(EXIT_FAILURE); } size_t n = atoi(argv[1]); if (!n) error(0, "invalid argument: %s", argv[1]); /* Read block size and kernel to launch from the environment */ const char *env_gpu_kernel = getenv("GPU_KERNEL"); const char *env_gpu_block_size = getenv("GPU_BLOCK_SIZE"); int kernel = (env_gpu_kernel) ? atoi(env_gpu_kernel) : GPU_NAIVE; int block_size = (env_gpu_block_size) ? atoi(env_gpu_block_size) : 256; size_t orig_n = n; // original matrix size int grid_size = n % block_size == 0 ? n / block_size : n / block_size + 1; // grid size /* * Adjust appropriately (increase * only) the matrix size here if that helps you with your * kernel code, e.g., to avoid divergent warps. */ n = grid_size * block_size; printf("Matrix size: %zd\n", orig_n); printf("Adjusted matrix size: %zd\n", n); /* * Allocate the structures. * * Initialization to zero is crucial if you adjusted the matrix * size. */ value_t **A = (value_t **) calloc_2d(n, n, sizeof(**A)); if (!A) error(1, "alloc_2d failed"); value_t *x = (value_t *) calloc(n, sizeof(*x)); if (!x) error(1, "malloc failed"); value_t *y_serial = (value_t *) calloc(n, sizeof(*y_serial)); if (!y_serial) error(1, "malloc failed"); value_t *y = (value_t *) calloc(n, sizeof(*y)); if (!y) error(1, "malloc failed"); /* Initialize */ srand48(0); mat_init_rand(A, orig_n, VALUES_MAX); vec_init_rand(x, orig_n, VALUES_MAX); vec_init(y_serial, orig_n, MAKE_VALUE_CONSTANT(0.0)); vec_init(y, orig_n, MAKE_VALUE_CONSTANT(0.0)); /* Setup timers */ xtimer_t timer; /* Compute serial */ #ifdef SERIAL_KERNEL printf(">>>> Begin of record <<<<\n"); printf("Serial version:\n"); timer_clear(&timer); timer_start(&timer); for (size_t i = 0; i < NR_ITER; ++i) dmv_serial(A, x, y_serial, orig_n); timer_stop(&timer); report_results(&timer, orig_n); printf(">>>> End of record <<<<\n"); #endif // SERIAL_KERNEL #ifdef OPENMP_KERNEL /* Compute OpenMP */ printf(">>>> Begin of record <<<<\n"); printf("OpenMP version:\n"); timer_clear(&timer); timer_start(&timer); for (size_t i = 0; i < NR_ITER; ++i) dmv_omp(A, x, y, orig_n); timer_stop(&timer); #ifndef _NOCHECK_ check_result(y, y_serial, orig_n); #endif report_results(&timer, orig_n); printf(">>>> End of record <<<<\n"); #endif // OPENMP_KERNEL #ifdef GPU_KERNEL /* * Set up the blocks, grid and shared memory depending on * the kernel. Make any transformations to the input * matrix here. */ if (kernel > 0) mat_transpose(A, n); dim3 gpu_block(block_size, 1); // set up the block dimensions dim3 gpu_grid(grid_size, grid_size); // set up the grid dimensions size_t shmem_size = 0; // set up the shared memory size if (kernel == 2) { shmem_size = block_size * sizeof(value_t); } /* Cublas initializations */ value_t alpha=1.0, beta=0; cublasHandle_t handle; cublasCreate(&handle); printf(">>>> Begin of record <<<<\n"); printf("Block size: %dx%d\n", gpu_block.x, gpu_block.y); printf("Grid size : %dx%d\n", gpu_grid.x, gpu_grid.y); printf("Shared memory size: %ld bytes\n", shmem_size); /* GPU allocations */ value_t *gpu_A = (value_t *) gpu_alloc(n*n*sizeof(*gpu_A)); if (!gpu_A) error(0, "gpu_alloc failed: %s", gpu_get_last_errmsg()); value_t *gpu_x = (value_t *) gpu_alloc(n*sizeof(*gpu_x)); if (!gpu_x) error(0, "gpu_alloc failed: %s", gpu_get_last_errmsg()); value_t *gpu_y = (value_t *) gpu_alloc(n*sizeof(*gpu_y)); if (!gpu_y) error(0, "gpu_alloc failed: %s", gpu_get_last_errmsg()); /* Copy data to GPU */ if (copy_to_gpu(A[0], gpu_A, n*n*sizeof(*gpu_A)) < 0) error(0, "copy_to_gpu failed: %s", gpu_get_last_errmsg()); if (copy_to_gpu(x, gpu_x, n*sizeof(*gpu_x)) < 0) error(0, "copy_to_gpu failed: %s", gpu_get_last_errmsg()); /* Reset y and copy it to GPU */ vec_init(y, n, MAKE_VALUE_CONSTANT(0.0)); if (copy_to_gpu(y, gpu_y, n*sizeof(*gpu_y)) < 0) error(0, "copy_to_gpu failed: %s", gpu_get_last_errmsg()); if (kernel >= GPU_KERNEL_END) printf("GPU kernel version: cuBLAS\n"); // error(0, "the requested kernel does not exist"); else printf("GPU kernel version: %s\n", gpu_kernels[kernel].name); /* Execute and time the kernel */ timer_clear(&timer); timer_start(&timer); for (size_t i = 0; i < NR_ITER; ++i) { if (kernel < GPU_KERNEL_END) { gpu_kernels[kernel].fn<<<gpu_grid,gpu_block,shmem_size>>> (gpu_A, gpu_x, gpu_y, n); } else { cublasSgemv(handle, CUBLAS_OP_N, n, n, &alpha, gpu_A, n, gpu_x, 1, &beta, gpu_y, 1); } #ifdef _DEBUG_ cudaError_t err; if ( (err = cudaGetLastError()) != cudaSuccess) error(0, "gpu kernel failed to launch: %s", gpu_get_errmsg(err)); #endif cudaThreadSynchronize(); } timer_stop(&timer); /* Copy result back to host and check */ if (copy_from_gpu(y, gpu_y, n*sizeof(*y)) < 0) error(0, "copy_from_gpu failed: %s", gpu_get_last_errmsg()); /* Fix resuts */ if (kernel < 3) { for (int i=0; i<n; i++) { y[i] /= NR_ITER; } } #ifndef _NOCHECK_ check_result(y, y_serial, orig_n); #endif report_results(&timer, orig_n); printf(">>>> End of record <<<<\n"); #endif // GPU_KERNEL /* Free resources on host */ free_2d((void **) A); free(x); free(y); free(y_serial); #ifdef GPU_KERNEL /* Free resources on GPU */ gpu_free(gpu_A); gpu_free(gpu_x); gpu_free(gpu_y); #endif // GPU_KERNEL return EXIT_SUCCESS; }
b1d32e33ed34e0f4d1555d25599ee6b747edbf26.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/DistributionTemplates.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_kernel.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <THH/THHGeneral.h> #include <THH/THHApply.cuh> #include <THH/THHDeviceUtils.cuh> #include <cstdint> #include <limits> #include <utility> #include <type_traits> /** * Note [Register spilling in hiprand call for CUDA < 10] * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * For CUDA < 10, hiprandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth) * when called to generate one random number at a time. This is because the line * unsigned ret = (&state->output.x)[state->STATE++]; * in * QUALIFIERS unsigned int hiprand(hiprandStatePhilox4_32_10_t *state) * in hiprand/hiprand_kernel.h dynamically indexes into state.output, preventing the compiler from ever * storing state.output in registers. * * CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels * we are using hiprand distributions that utilize hiprand4 call. hiprand4 call doesn't have the * register spilling problem. */ namespace { template <typename scalar_t> void poisson_cuda_kernel( at::Tensor& ret, const at::Tensor& lambda, std::pair<uint64_t, uint64_t> seeds) { at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, lambda, [seeds] __device__( scalar_t & ret_val, const scalar_t& lambda) { hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); ret_val = static_cast<scalar_t>(hiprand_poisson(&state, lambda)); }); } struct curand_uniform_wrapper { hiprandStatePhilox4_32_10_t &state; __device__ curand_uniform_wrapper(hiprandStatePhilox4_32_10_t &state): state(state) {} __device__ float operator()() { return hiprand_uniform(&state); } }; template <typename scalar_t> void binomial_cuda_kernel( at::Tensor& ret, const at::Tensor& count, const at::Tensor& prob, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(count) .add_input(prob) .build(); at::native::distribution_binary_kernel(iter, seeds, [seeds] GPU_LAMBDA (hiprandStatePhilox4_32_10_t& state, scalar_t count, scalar_t prob) { #if defined(__CUDA_ARCH__) || defined(__HIP_PLATFORM_HCC__) auto uniform_lambda = curand_uniform_wrapper(state); BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto sample = sample_binomial<scalar_t, accscalar_t, decltype(uniform_lambda)>(count, prob, standard_uniform); return static_cast<scalar_t>(sample); #else return count; // useless. #endif } ); } template <typename scalar_t> void gamma_cuda_kernel( at::Tensor& ret, const at::Tensor& alpha, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, alpha, [seeds] __device__( scalar_t & ret_val, const scalar_t& alpha) { hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); auto uniform_lambda = [&state] __device__ () { return hiprand_uniform(&state); }; BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto normal_lambda = [&state] __device__ () { return hiprand_normal(&state); }; BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda); auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal); auto min_value = std::numeric_limits<scalar_t>::min(); ret_val = (min_value > sample) ? min_value : sample; }); } template<typename scalar_t> void dirichlet_scalar_cuda_kernel( at::Tensor& ret, const at::Tensor& gamma) { auto gamma_sum = gamma.sum(-1, true); at::TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(gamma) .add_input(gamma_sum) .build(); at::native::gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gamma, scalar_t gamma_sum) { auto ret_val = gamma / gamma_sum; auto min_value = std::numeric_limits<scalar_t>::min(); auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon(); ret_val = (min_value > ret_val) ? min_value : ret_val; ret_val = (max_value < ret_val) ? max_value : ret_val; return ret_val; }); } } // namespace namespace at { namespace native { Tensor _s_poisson_cuda(const Tensor& lambda, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(20); } Tensor ret = at::empty(lambda.sizes(), lambda.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "poisson_cuda", [&] { poisson_cuda_kernel<scalar_t>(ret, lambda, rng_engine_inputs); }); return ret; } Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(42); } Tensor ret = at::empty(count.sizes(), count.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "binomial_cuda", [&] { binomial_cuda_kernel<scalar_t>(ret, count, prob, rng_engine_inputs); }); return ret; } Tensor _s_gamma_cuda(const Tensor& alpha, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(10); } Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "gamma_cuda", [&] { gamma_cuda_kernel<scalar_t>(ret, alpha, rng_engine_inputs); }); return ret; } Tensor _s_dirichlet_cuda(const Tensor& alpha, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(10); } Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "dirichlet", [&] { Tensor gamma = at::empty(alpha.sizes(), alpha.options()); gamma_cuda_kernel<scalar_t>(gamma, alpha, rng_engine_inputs); dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma); }); return ret; } Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) { Tensor ret = at::empty(self.sizes(), self.options()); TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(self) .add_input(output) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "_standard_gamma_grad_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t self_val, scalar_t output_val) { return standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val); }); }); return ret; } Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) { Tensor ret = at::empty(x.sizes(), x.options()); TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(x) .add_input(alpha) .add_input(total) .build(); AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t x_val, scalar_t alpha_val, scalar_t total_val) -> scalar_t { return dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val); }); }); return ret; } }} // namespace at::native
b1d32e33ed34e0f4d1555d25599ee6b747edbf26.cu
#include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/DistributionTemplates.h> #include <curand.h> #include <curand_kernel.h> #include <curand_philox4x32_x.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <THC/THCGeneral.h> #include <THC/THCApply.cuh> #include <THC/THCDeviceUtils.cuh> #include <cstdint> #include <limits> #include <utility> #include <type_traits> /** * Note [Register spilling in curand call for CUDA < 10] * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * For CUDA < 10, curandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth) * when called to generate one random number at a time. This is because the line * unsigned ret = (&state->output.x)[state->STATE++]; * in * QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state) * in curand_kernel.h dynamically indexes into state.output, preventing the compiler from ever * storing state.output in registers. * * CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels * we are using curand distributions that utilize curand4 call. curand4 call doesn't have the * register spilling problem. */ namespace { template <typename scalar_t> void poisson_cuda_kernel( at::Tensor& ret, const at::Tensor& lambda, std::pair<uint64_t, uint64_t> seeds) { at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, lambda, [seeds] __device__( scalar_t & ret_val, const scalar_t& lambda) { curandStatePhilox4_32_10_t state; curand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); ret_val = static_cast<scalar_t>(curand_poisson(&state, lambda)); }); } struct curand_uniform_wrapper { curandStatePhilox4_32_10_t &state; __device__ curand_uniform_wrapper(curandStatePhilox4_32_10_t &state): state(state) {} __device__ float operator()() { return curand_uniform(&state); } }; template <typename scalar_t> void binomial_cuda_kernel( at::Tensor& ret, const at::Tensor& count, const at::Tensor& prob, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(count) .add_input(prob) .build(); at::native::distribution_binary_kernel(iter, seeds, [seeds] GPU_LAMBDA (curandStatePhilox4_32_10_t& state, scalar_t count, scalar_t prob) { #if defined(__CUDA_ARCH__) || defined(__HIP_PLATFORM_HCC__) auto uniform_lambda = curand_uniform_wrapper(state); BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto sample = sample_binomial<scalar_t, accscalar_t, decltype(uniform_lambda)>(count, prob, standard_uniform); return static_cast<scalar_t>(sample); #else return count; // useless. #endif } ); } template <typename scalar_t> void gamma_cuda_kernel( at::Tensor& ret, const at::Tensor& alpha, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, alpha, [seeds] __device__( scalar_t & ret_val, const scalar_t& alpha) { curandStatePhilox4_32_10_t state; curand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); auto uniform_lambda = [&state] __device__ () { return curand_uniform(&state); }; BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto normal_lambda = [&state] __device__ () { return curand_normal(&state); }; BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda); auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal); auto min_value = std::numeric_limits<scalar_t>::min(); ret_val = (min_value > sample) ? min_value : sample; }); } template<typename scalar_t> void dirichlet_scalar_cuda_kernel( at::Tensor& ret, const at::Tensor& gamma) { auto gamma_sum = gamma.sum(-1, true); at::TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(gamma) .add_input(gamma_sum) .build(); at::native::gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gamma, scalar_t gamma_sum) { auto ret_val = gamma / gamma_sum; auto min_value = std::numeric_limits<scalar_t>::min(); auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon(); ret_val = (min_value > ret_val) ? min_value : ret_val; ret_val = (max_value < ret_val) ? max_value : ret_val; return ret_val; }); } } // namespace namespace at { namespace native { Tensor _s_poisson_cuda(const Tensor& lambda, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(20); } Tensor ret = at::empty(lambda.sizes(), lambda.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "poisson_cuda", [&] { poisson_cuda_kernel<scalar_t>(ret, lambda, rng_engine_inputs); }); return ret; } Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(42); } Tensor ret = at::empty(count.sizes(), count.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "binomial_cuda", [&] { binomial_cuda_kernel<scalar_t>(ret, count, prob, rng_engine_inputs); }); return ret; } Tensor _s_gamma_cuda(const Tensor& alpha, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(10); } Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "gamma_cuda", [&] { gamma_cuda_kernel<scalar_t>(ret, alpha, rng_engine_inputs); }); return ret; } Tensor _s_dirichlet_cuda(const Tensor& alpha, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(10); } Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "dirichlet", [&] { Tensor gamma = at::empty(alpha.sizes(), alpha.options()); gamma_cuda_kernel<scalar_t>(gamma, alpha, rng_engine_inputs); dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma); }); return ret; } Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) { Tensor ret = at::empty(self.sizes(), self.options()); TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(self) .add_input(output) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "_standard_gamma_grad_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t self_val, scalar_t output_val) { return standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val); }); }); return ret; } Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) { Tensor ret = at::empty(x.sizes(), x.options()); TensorIterator iter = at::TensorIteratorConfig() .add_output(ret) .add_input(x) .add_input(alpha) .add_input(total) .build(); AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t x_val, scalar_t alpha_val, scalar_t total_val) -> scalar_t { return dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val); }); }); return ret; } }} // namespace at::native
44dbe8546da01497b8c357c1a0d403298979afb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define DEBUG #define float double #define INDEX(fst, snd, n) ((fst) * (n) + (snd)) #define SIZE (5000) #define TILL (100) #define N_TILL (SIZE / TILL) __global__ void multiple(float* matrix, float* vector, float* out) { /* * a thread get 100 element in a line (one line 50 thread) * thread is (50, 20) * 20 lines fill a block. * a matrix has 250 blocks */ int x = threadIdx.x; int y = threadIdx.y; int blk = blockIdx.x; float sum = 0; for (int i = x * 100; i < (x + 1) * 100; ++i) { sum += matrix[INDEX(y + blk * 20, i, SIZE)] * vector[i]; } atomicAdd(&out[y + blk * 20], (float)sum); } void validator(float* matrix, float* vector, float* out) { for (int i = 0; i < SIZE; ++i) { float sum = 0; for (int j = 0; j < SIZE; ++j) { sum += matrix[INDEX(i, j, SIZE)] * vector[j]; } out[i] = sum; } } int main() { float* hA = (float*) malloc(sizeof(float) * SIZE * SIZE); float* dA; hipMalloc((void**) &dA, sizeof(float) * SIZE * SIZE); float* hx = (float*) malloc(sizeof(float) * SIZE); float* dx; hipMalloc((void**) &dx, sizeof(float) * SIZE * SIZE); float* out; hipMalloc((void**) &out, sizeof(float) * SIZE); float* valout = (float*) malloc(sizeof(float) * SIZE); // init hA and hx for (int i = 0; i < SIZE; ++i) { for (int j = 0; j < SIZE; ++j) { hA[INDEX(i, j, SIZE)] = i - 0.1 * j + 1; } hx[i] = 0.2 * i - 0.1 * sqrt(i); } // init out hipMemset(out, 0, sizeof(float)* SIZE); memset(valout, 0, sizeof(float) * SIZE); // transfer to gpu hipMemcpy(dA, hA, sizeof(float) * SIZE * SIZE, hipMemcpyHostToDevice); hipMemcpy(dx, hx, sizeof(float) * SIZE, hipMemcpyHostToDevice); dim3 threads(50, 20); hipLaunchKernelGGL(( multiple), dim3(250), dim3(threads), 0, 0, dA, dx, out); validator(hA, hx, valout); free(hA); free(hx); hipFree(dA); hipFree(dx); float* hout = (float*) malloc(sizeof(float) * SIZE); hipMemcpy(hout, out, sizeof(float)* SIZE, hipMemcpyDeviceToHost); for (int i = 0; i < 10; ++i) { printf("%f, (%f) \n", hout[i], valout[i]); } free(valout); free(hout); hipFree(out); }
44dbe8546da01497b8c357c1a0d403298979afb9.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #define DEBUG #define float double #define INDEX(fst, snd, n) ((fst) * (n) + (snd)) #define SIZE (5000) #define TILL (100) #define N_TILL (SIZE / TILL) __global__ void multiple(float* matrix, float* vector, float* out) { /* * a thread get 100 element in a line (one line 50 thread) * thread is (50, 20) * 20 lines fill a block. * a matrix has 250 blocks */ int x = threadIdx.x; int y = threadIdx.y; int blk = blockIdx.x; float sum = 0; for (int i = x * 100; i < (x + 1) * 100; ++i) { sum += matrix[INDEX(y + blk * 20, i, SIZE)] * vector[i]; } atomicAdd(&out[y + blk * 20], (float)sum); } void validator(float* matrix, float* vector, float* out) { for (int i = 0; i < SIZE; ++i) { float sum = 0; for (int j = 0; j < SIZE; ++j) { sum += matrix[INDEX(i, j, SIZE)] * vector[j]; } out[i] = sum; } } int main() { float* hA = (float*) malloc(sizeof(float) * SIZE * SIZE); float* dA; cudaMalloc((void**) &dA, sizeof(float) * SIZE * SIZE); float* hx = (float*) malloc(sizeof(float) * SIZE); float* dx; cudaMalloc((void**) &dx, sizeof(float) * SIZE * SIZE); float* out; cudaMalloc((void**) &out, sizeof(float) * SIZE); float* valout = (float*) malloc(sizeof(float) * SIZE); // init hA and hx for (int i = 0; i < SIZE; ++i) { for (int j = 0; j < SIZE; ++j) { hA[INDEX(i, j, SIZE)] = i - 0.1 * j + 1; } hx[i] = 0.2 * i - 0.1 * sqrt(i); } // init out cudaMemset(out, 0, sizeof(float)* SIZE); memset(valout, 0, sizeof(float) * SIZE); // transfer to gpu cudaMemcpy(dA, hA, sizeof(float) * SIZE * SIZE, cudaMemcpyHostToDevice); cudaMemcpy(dx, hx, sizeof(float) * SIZE, cudaMemcpyHostToDevice); dim3 threads(50, 20); multiple<<<250, threads>>>(dA, dx, out); validator(hA, hx, valout); free(hA); free(hx); cudaFree(dA); cudaFree(dx); float* hout = (float*) malloc(sizeof(float) * SIZE); cudaMemcpy(hout, out, sizeof(float)* SIZE, cudaMemcpyDeviceToHost); for (int i = 0; i < 10; ++i) { printf("%f, (%f) \n", hout[i], valout[i]); } free(valout); free(hout); cudaFree(out); }
0a33e75321b44caf5bfe5301f918280115d44c29.hip
// !!! This is a file automatically generated by hipify!!! #include <gtest/gtest.h> #include <vector> #include "../../../../src/common/categorical.h" #include "../../../../src/tree/gpu_hist/histogram.cuh" #include "../../../../src/tree/gpu_hist/row_partitioner.cuh" #include "../../categorical_helpers.h" #include "../../helpers.h" namespace xgboost { namespace tree { void TestDeterministicHistogram(bool is_dense, int shm_size) { size_t constexpr kBins = 256, kCols = 120, kRows = 16384, kRounds = 16; float constexpr kLower = -1e-2, kUpper = 1e2; float sparsity = is_dense ? 0.0f : 0.5f; auto matrix = RandomDataGenerator(kRows, kCols, sparsity).GenerateDMatrix(); BatchParam batch_param{0, static_cast<int32_t>(kBins)}; for (auto const& batch : matrix->GetBatches<EllpackPage>(batch_param)) { auto* page = batch.Impl(); tree::RowPartitioner row_partitioner(0, kRows); auto ridx = row_partitioner.GetRows(0); int num_bins = kBins * kCols; dh::device_vector<GradientPairInt64> histogram(num_bins); auto d_histogram = dh::ToSpan(histogram); auto gpair = GenerateRandomGradients(kRows, kLower, kUpper); gpair.SetDevice(0); FeatureGroups feature_groups(page->Cuts(), page->is_dense, shm_size, sizeof(GradientPairInt64)); auto rounding = GradientQuantizer(gpair.DeviceSpan()); BuildGradientHistogram(page->GetDeviceAccessor(0), feature_groups.DeviceAccessor(0), gpair.DeviceSpan(), ridx, d_histogram, rounding); std::vector<GradientPairInt64> histogram_h(num_bins); dh::safe_cuda(hipMemcpy(histogram_h.data(), d_histogram.data(), num_bins * sizeof(GradientPairInt64), hipMemcpyDeviceToHost)); for (size_t i = 0; i < kRounds; ++i) { dh::device_vector<GradientPairInt64> new_histogram(num_bins); auto d_new_histogram = dh::ToSpan(new_histogram); auto rounding = GradientQuantizer(gpair.DeviceSpan()); BuildGradientHistogram(page->GetDeviceAccessor(0), feature_groups.DeviceAccessor(0), gpair.DeviceSpan(), ridx, d_new_histogram, rounding); std::vector<GradientPairInt64> new_histogram_h(num_bins); dh::safe_cuda(hipMemcpy(new_histogram_h.data(), d_new_histogram.data(), num_bins * sizeof(GradientPairInt64), hipMemcpyDeviceToHost)); for (size_t j = 0; j < new_histogram_h.size(); ++j) { ASSERT_EQ(new_histogram_h[j].GetQuantisedGrad(), histogram_h[j].GetQuantisedGrad()); ASSERT_EQ(new_histogram_h[j].GetQuantisedHess(), histogram_h[j].GetQuantisedHess()); } } { auto gpair = GenerateRandomGradients(kRows, kLower, kUpper); gpair.SetDevice(0); // Use a single feature group to compute the baseline. FeatureGroups single_group(page->Cuts()); dh::device_vector<GradientPairInt64> baseline(num_bins); BuildGradientHistogram(page->GetDeviceAccessor(0), single_group.DeviceAccessor(0), gpair.DeviceSpan(), ridx, dh::ToSpan(baseline), rounding); std::vector<GradientPairInt64> baseline_h(num_bins); dh::safe_cuda(hipMemcpy(baseline_h.data(), baseline.data().get(), num_bins * sizeof(GradientPairInt64), hipMemcpyDeviceToHost)); for (size_t i = 0; i < baseline.size(); ++i) { EXPECT_NEAR(baseline_h[i].GetQuantisedGrad(), histogram_h[i].GetQuantisedGrad(), baseline_h[i].GetQuantisedGrad() * 1e-3); } } } } TEST(Histogram, GPUDeterministic) { std::vector<bool> is_dense_array{false, true}; std::vector<int> shm_sizes{48 * 1024, 64 * 1024, 160 * 1024}; for (bool is_dense : is_dense_array) { for (int shm_size : shm_sizes) { TestDeterministicHistogram(is_dense, shm_size); } } } void ValidateCategoricalHistogram(size_t n_categories, common::Span<GradientPairInt64> onehot, common::Span<GradientPairInt64> cat) { auto cat_sum = std::accumulate(cat.cbegin(), cat.cend(), GradientPairInt64{}); for (size_t c = 0; c < n_categories; ++c) { auto zero = onehot[c * 2]; auto one = onehot[c * 2 + 1]; auto chosen = cat[c]; auto not_chosen = cat_sum - chosen; ASSERT_EQ(zero, not_chosen); ASSERT_EQ(one, chosen); } } // Test 1 vs rest categorical histogram is equivalent to one hot encoded data. void TestGPUHistogramCategorical(size_t num_categories) { size_t constexpr kRows = 340; size_t constexpr kBins = 256; auto x = GenerateRandomCategoricalSingleColumn(kRows, num_categories); auto cat_m = GetDMatrixFromData(x, kRows, 1); cat_m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical); BatchParam batch_param{0, static_cast<int32_t>(kBins)}; tree::RowPartitioner row_partitioner(0, kRows); auto ridx = row_partitioner.GetRows(0); dh::device_vector<GradientPairInt64> cat_hist(num_categories); auto gpair = GenerateRandomGradients(kRows, 0, 2); gpair.SetDevice(0); auto rounding = GradientQuantizer(gpair.DeviceSpan()); /** * Generate hist with cat data. */ for (auto const &batch : cat_m->GetBatches<EllpackPage>(batch_param)) { auto* page = batch.Impl(); FeatureGroups single_group(page->Cuts()); BuildGradientHistogram(page->GetDeviceAccessor(0), single_group.DeviceAccessor(0), gpair.DeviceSpan(), ridx, dh::ToSpan(cat_hist), rounding); } /** * Generate hist with one hot encoded data. */ auto x_encoded = OneHotEncodeFeature(x, num_categories); auto encode_m = GetDMatrixFromData(x_encoded, kRows, num_categories); dh::device_vector<GradientPairInt64> encode_hist(2 * num_categories); for (auto const &batch : encode_m->GetBatches<EllpackPage>(batch_param)) { auto* page = batch.Impl(); FeatureGroups single_group(page->Cuts()); BuildGradientHistogram(page->GetDeviceAccessor(0), single_group.DeviceAccessor(0), gpair.DeviceSpan(), ridx, dh::ToSpan(encode_hist), rounding); } std::vector<GradientPairInt64> h_cat_hist(cat_hist.size()); thrust::copy(cat_hist.begin(), cat_hist.end(), h_cat_hist.begin()); std::vector<GradientPairInt64> h_encode_hist(encode_hist.size()); thrust::copy(encode_hist.begin(), encode_hist.end(), h_encode_hist.begin()); ValidateCategoricalHistogram(num_categories, common::Span<GradientPairInt64>{h_encode_hist}, common::Span<GradientPairInt64>{h_cat_hist}); } TEST(Histogram, GPUHistCategorical) { for (size_t num_categories = 2; num_categories < 8; ++num_categories) { TestGPUHistogramCategorical(num_categories); } } namespace { // Atomic add as type cast for test. XGBOOST_DEV_INLINE int64_t atomicAdd(int64_t *dst, int64_t src) { // NOLINT uint64_t* u_dst = reinterpret_cast<uint64_t*>(dst); uint64_t u_src = *reinterpret_cast<uint64_t*>(&src); uint64_t ret = ::atomicAdd(u_dst, u_src); return *reinterpret_cast<int64_t*>(&ret); } } void TestAtomicAdd() { size_t n_elements = 1024; dh::device_vector<int64_t> result_a(1, 0); auto d_result_a = result_a.data().get(); dh::device_vector<int64_t> result_b(1, 0); auto d_result_b = result_b.data().get(); /** * Test for simple inputs */ std::vector<int64_t> h_inputs(n_elements); for (size_t i = 0; i < h_inputs.size(); ++i) { h_inputs[i] = (i % 2 == 0) ? i : -i; } dh::device_vector<int64_t> inputs(h_inputs); auto d_inputs = inputs.data().get(); dh::LaunchN(n_elements, [=] __device__(size_t i) { AtomicAdd64As32(d_result_a, d_inputs[i]); atomicAdd(d_result_b, d_inputs[i]); }); ASSERT_EQ(result_a[0], result_b[0]); /** * Test for positive values that don't fit into 32 bit integer. */ thrust::fill(inputs.begin(), inputs.end(), (std::numeric_limits<uint32_t>::max() / 2)); thrust::fill(result_a.begin(), result_a.end(), 0); thrust::fill(result_b.begin(), result_b.end(), 0); dh::LaunchN(n_elements, [=] __device__(size_t i) { AtomicAdd64As32(d_result_a, d_inputs[i]); atomicAdd(d_result_b, d_inputs[i]); }); ASSERT_EQ(result_a[0], result_b[0]); ASSERT_GT(result_a[0], std::numeric_limits<uint32_t>::max()); CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]); /** * Test for negative values that don't fit into 32 bit integer. */ thrust::fill(inputs.begin(), inputs.end(), (std::numeric_limits<int32_t>::min() / 2)); thrust::fill(result_a.begin(), result_a.end(), 0); thrust::fill(result_b.begin(), result_b.end(), 0); dh::LaunchN(n_elements, [=] __device__(size_t i) { AtomicAdd64As32(d_result_a, d_inputs[i]); atomicAdd(d_result_b, d_inputs[i]); }); ASSERT_EQ(result_a[0], result_b[0]); ASSERT_LT(result_a[0], std::numeric_limits<int32_t>::min()); CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]); } TEST(Histogram, AtomicAddInt64) { TestAtomicAdd(); } } // namespace tree } // namespace xgboost
0a33e75321b44caf5bfe5301f918280115d44c29.cu
#include <gtest/gtest.h> #include <vector> #include "../../../../src/common/categorical.h" #include "../../../../src/tree/gpu_hist/histogram.cuh" #include "../../../../src/tree/gpu_hist/row_partitioner.cuh" #include "../../categorical_helpers.h" #include "../../helpers.h" namespace xgboost { namespace tree { void TestDeterministicHistogram(bool is_dense, int shm_size) { size_t constexpr kBins = 256, kCols = 120, kRows = 16384, kRounds = 16; float constexpr kLower = -1e-2, kUpper = 1e2; float sparsity = is_dense ? 0.0f : 0.5f; auto matrix = RandomDataGenerator(kRows, kCols, sparsity).GenerateDMatrix(); BatchParam batch_param{0, static_cast<int32_t>(kBins)}; for (auto const& batch : matrix->GetBatches<EllpackPage>(batch_param)) { auto* page = batch.Impl(); tree::RowPartitioner row_partitioner(0, kRows); auto ridx = row_partitioner.GetRows(0); int num_bins = kBins * kCols; dh::device_vector<GradientPairInt64> histogram(num_bins); auto d_histogram = dh::ToSpan(histogram); auto gpair = GenerateRandomGradients(kRows, kLower, kUpper); gpair.SetDevice(0); FeatureGroups feature_groups(page->Cuts(), page->is_dense, shm_size, sizeof(GradientPairInt64)); auto rounding = GradientQuantizer(gpair.DeviceSpan()); BuildGradientHistogram(page->GetDeviceAccessor(0), feature_groups.DeviceAccessor(0), gpair.DeviceSpan(), ridx, d_histogram, rounding); std::vector<GradientPairInt64> histogram_h(num_bins); dh::safe_cuda(cudaMemcpy(histogram_h.data(), d_histogram.data(), num_bins * sizeof(GradientPairInt64), cudaMemcpyDeviceToHost)); for (size_t i = 0; i < kRounds; ++i) { dh::device_vector<GradientPairInt64> new_histogram(num_bins); auto d_new_histogram = dh::ToSpan(new_histogram); auto rounding = GradientQuantizer(gpair.DeviceSpan()); BuildGradientHistogram(page->GetDeviceAccessor(0), feature_groups.DeviceAccessor(0), gpair.DeviceSpan(), ridx, d_new_histogram, rounding); std::vector<GradientPairInt64> new_histogram_h(num_bins); dh::safe_cuda(cudaMemcpy(new_histogram_h.data(), d_new_histogram.data(), num_bins * sizeof(GradientPairInt64), cudaMemcpyDeviceToHost)); for (size_t j = 0; j < new_histogram_h.size(); ++j) { ASSERT_EQ(new_histogram_h[j].GetQuantisedGrad(), histogram_h[j].GetQuantisedGrad()); ASSERT_EQ(new_histogram_h[j].GetQuantisedHess(), histogram_h[j].GetQuantisedHess()); } } { auto gpair = GenerateRandomGradients(kRows, kLower, kUpper); gpair.SetDevice(0); // Use a single feature group to compute the baseline. FeatureGroups single_group(page->Cuts()); dh::device_vector<GradientPairInt64> baseline(num_bins); BuildGradientHistogram(page->GetDeviceAccessor(0), single_group.DeviceAccessor(0), gpair.DeviceSpan(), ridx, dh::ToSpan(baseline), rounding); std::vector<GradientPairInt64> baseline_h(num_bins); dh::safe_cuda(cudaMemcpy(baseline_h.data(), baseline.data().get(), num_bins * sizeof(GradientPairInt64), cudaMemcpyDeviceToHost)); for (size_t i = 0; i < baseline.size(); ++i) { EXPECT_NEAR(baseline_h[i].GetQuantisedGrad(), histogram_h[i].GetQuantisedGrad(), baseline_h[i].GetQuantisedGrad() * 1e-3); } } } } TEST(Histogram, GPUDeterministic) { std::vector<bool> is_dense_array{false, true}; std::vector<int> shm_sizes{48 * 1024, 64 * 1024, 160 * 1024}; for (bool is_dense : is_dense_array) { for (int shm_size : shm_sizes) { TestDeterministicHistogram(is_dense, shm_size); } } } void ValidateCategoricalHistogram(size_t n_categories, common::Span<GradientPairInt64> onehot, common::Span<GradientPairInt64> cat) { auto cat_sum = std::accumulate(cat.cbegin(), cat.cend(), GradientPairInt64{}); for (size_t c = 0; c < n_categories; ++c) { auto zero = onehot[c * 2]; auto one = onehot[c * 2 + 1]; auto chosen = cat[c]; auto not_chosen = cat_sum - chosen; ASSERT_EQ(zero, not_chosen); ASSERT_EQ(one, chosen); } } // Test 1 vs rest categorical histogram is equivalent to one hot encoded data. void TestGPUHistogramCategorical(size_t num_categories) { size_t constexpr kRows = 340; size_t constexpr kBins = 256; auto x = GenerateRandomCategoricalSingleColumn(kRows, num_categories); auto cat_m = GetDMatrixFromData(x, kRows, 1); cat_m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical); BatchParam batch_param{0, static_cast<int32_t>(kBins)}; tree::RowPartitioner row_partitioner(0, kRows); auto ridx = row_partitioner.GetRows(0); dh::device_vector<GradientPairInt64> cat_hist(num_categories); auto gpair = GenerateRandomGradients(kRows, 0, 2); gpair.SetDevice(0); auto rounding = GradientQuantizer(gpair.DeviceSpan()); /** * Generate hist with cat data. */ for (auto const &batch : cat_m->GetBatches<EllpackPage>(batch_param)) { auto* page = batch.Impl(); FeatureGroups single_group(page->Cuts()); BuildGradientHistogram(page->GetDeviceAccessor(0), single_group.DeviceAccessor(0), gpair.DeviceSpan(), ridx, dh::ToSpan(cat_hist), rounding); } /** * Generate hist with one hot encoded data. */ auto x_encoded = OneHotEncodeFeature(x, num_categories); auto encode_m = GetDMatrixFromData(x_encoded, kRows, num_categories); dh::device_vector<GradientPairInt64> encode_hist(2 * num_categories); for (auto const &batch : encode_m->GetBatches<EllpackPage>(batch_param)) { auto* page = batch.Impl(); FeatureGroups single_group(page->Cuts()); BuildGradientHistogram(page->GetDeviceAccessor(0), single_group.DeviceAccessor(0), gpair.DeviceSpan(), ridx, dh::ToSpan(encode_hist), rounding); } std::vector<GradientPairInt64> h_cat_hist(cat_hist.size()); thrust::copy(cat_hist.begin(), cat_hist.end(), h_cat_hist.begin()); std::vector<GradientPairInt64> h_encode_hist(encode_hist.size()); thrust::copy(encode_hist.begin(), encode_hist.end(), h_encode_hist.begin()); ValidateCategoricalHistogram(num_categories, common::Span<GradientPairInt64>{h_encode_hist}, common::Span<GradientPairInt64>{h_cat_hist}); } TEST(Histogram, GPUHistCategorical) { for (size_t num_categories = 2; num_categories < 8; ++num_categories) { TestGPUHistogramCategorical(num_categories); } } namespace { // Atomic add as type cast for test. XGBOOST_DEV_INLINE int64_t atomicAdd(int64_t *dst, int64_t src) { // NOLINT uint64_t* u_dst = reinterpret_cast<uint64_t*>(dst); uint64_t u_src = *reinterpret_cast<uint64_t*>(&src); uint64_t ret = ::atomicAdd(u_dst, u_src); return *reinterpret_cast<int64_t*>(&ret); } } void TestAtomicAdd() { size_t n_elements = 1024; dh::device_vector<int64_t> result_a(1, 0); auto d_result_a = result_a.data().get(); dh::device_vector<int64_t> result_b(1, 0); auto d_result_b = result_b.data().get(); /** * Test for simple inputs */ std::vector<int64_t> h_inputs(n_elements); for (size_t i = 0; i < h_inputs.size(); ++i) { h_inputs[i] = (i % 2 == 0) ? i : -i; } dh::device_vector<int64_t> inputs(h_inputs); auto d_inputs = inputs.data().get(); dh::LaunchN(n_elements, [=] __device__(size_t i) { AtomicAdd64As32(d_result_a, d_inputs[i]); atomicAdd(d_result_b, d_inputs[i]); }); ASSERT_EQ(result_a[0], result_b[0]); /** * Test for positive values that don't fit into 32 bit integer. */ thrust::fill(inputs.begin(), inputs.end(), (std::numeric_limits<uint32_t>::max() / 2)); thrust::fill(result_a.begin(), result_a.end(), 0); thrust::fill(result_b.begin(), result_b.end(), 0); dh::LaunchN(n_elements, [=] __device__(size_t i) { AtomicAdd64As32(d_result_a, d_inputs[i]); atomicAdd(d_result_b, d_inputs[i]); }); ASSERT_EQ(result_a[0], result_b[0]); ASSERT_GT(result_a[0], std::numeric_limits<uint32_t>::max()); CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]); /** * Test for negative values that don't fit into 32 bit integer. */ thrust::fill(inputs.begin(), inputs.end(), (std::numeric_limits<int32_t>::min() / 2)); thrust::fill(result_a.begin(), result_a.end(), 0); thrust::fill(result_b.begin(), result_b.end(), 0); dh::LaunchN(n_elements, [=] __device__(size_t i) { AtomicAdd64As32(d_result_a, d_inputs[i]); atomicAdd(d_result_b, d_inputs[i]); }); ASSERT_EQ(result_a[0], result_b[0]); ASSERT_LT(result_a[0], std::numeric_limits<int32_t>::min()); CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]); } TEST(Histogram, AtomicAddInt64) { TestAtomicAdd(); } } // namespace tree } // namespace xgboost
c34521787f3a471d00bbad540f8f1476495db05f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "headers.h" #define N 4 #define blockSize 1024 void DisplayMatrixRowMajor(const char *inp, int *arr, int noElements) { printf("%s\n", inp); for(int i = 0; i < noElements; i+=4) { printf("%d \t %d \t %d \t %d", arr[i], arr[i+1], arr[i+2], arr[i+3]); printf("\n\n"); } } void DisplayOutputMatrixRowMajor(const char *inp, int *arr) { printf("%s\n", inp); int i = 0; printf("%d \t %d \t %d \t %d", arr[i], arr[i+1], arr[i+2], arr[i+3]); printf("\n\n"); } int main(int argc, char const *argv[]) { // Error Code to check return values for CUDA calls hipError_t err = hipSuccess; int noElements = 4*N; // taking ceil int gridSize = (int)ceil((float)noElements/(blockSize)); size_t size = noElements*sizeof(int); printf("[Operations on Kernel on %d elements] \n", noElements); // Host Matrix int *h_arr = (int*)malloc(size); // Creating Random Matrix for(int i = 0; i < noElements; i++) { h_arr[i] = rand()%10; } // Display initial Matrix DisplayMatrixRowMajor("Initial Matrix", h_arr, noElements); // Device Matric int *d_arr = NULL; err = hipMalloc((void **)&d_arr, size); if(err != hipSuccess){ fprintf(stderr, "Failed to allocates size to device array. Error code %s \n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipMemcpy(d_arr, h_arr, size, hipMemcpyHostToDevice); // Output Matrix int *d_out = NULL; err = hipMalloc((void **)&d_out, size); if(err != hipSuccess){ fprintf(stderr, "Failed to allocates size to device Output array. Error code %s \n", hipGetErrorString(err)); exit(EXIT_FAILURE); } int *d_out2 = NULL; err = hipMalloc((void **)&d_out2, size); if(err != hipSuccess){ fprintf(stderr, "Failed to allocates size to device Output array. Error code %s \n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Dimensions dim3 blocksPerGrid(gridSize); dim3 threadsPerBlock(blockSize); size_t shmsz = blockSize*sizeof(int); hipLaunchKernelGGL(( ReduceRowMajor), dim3(blocksPerGrid), dim3(threadsPerBlock) , shmsz , 0, d_arr, d_out, noElements); int *h_out = (int*)malloc(shmz); hipMemcpy(h_out, d_out2, shmz, hipMemcpyDeviceToHost); // Display The Matrix DisplayOutputMatrixRowMajor("Row Major Sum Matrix", h_out); free(h_arr); free(h_out); hipFree(d_arr); hipFree(d_out); hipFree(d_out2); // Reset the Device and Exit err = hipDeviceReset(); if(err != hipSuccess){ fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
c34521787f3a471d00bbad540f8f1476495db05f.cu
#include "headers.h" #define N 4 #define blockSize 1024 void DisplayMatrixRowMajor(const char *inp, int *arr, int noElements) { printf("%s\n", inp); for(int i = 0; i < noElements; i+=4) { printf("%d \t %d \t %d \t %d", arr[i], arr[i+1], arr[i+2], arr[i+3]); printf("\n\n"); } } void DisplayOutputMatrixRowMajor(const char *inp, int *arr) { printf("%s\n", inp); int i = 0; printf("%d \t %d \t %d \t %d", arr[i], arr[i+1], arr[i+2], arr[i+3]); printf("\n\n"); } int main(int argc, char const *argv[]) { // Error Code to check return values for CUDA calls cudaError_t err = cudaSuccess; int noElements = 4*N; // taking ceil int gridSize = (int)ceil((float)noElements/(blockSize)); size_t size = noElements*sizeof(int); printf("[Operations on Kernel on %d elements] \n", noElements); // Host Matrix int *h_arr = (int*)malloc(size); // Creating Random Matrix for(int i = 0; i < noElements; i++) { h_arr[i] = rand()%10; } // Display initial Matrix DisplayMatrixRowMajor("Initial Matrix", h_arr, noElements); // Device Matric int *d_arr = NULL; err = cudaMalloc((void **)&d_arr, size); if(err != cudaSuccess){ fprintf(stderr, "Failed to allocates size to device array. Error code %s \n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaMemcpy(d_arr, h_arr, size, cudaMemcpyHostToDevice); // Output Matrix int *d_out = NULL; err = cudaMalloc((void **)&d_out, size); if(err != cudaSuccess){ fprintf(stderr, "Failed to allocates size to device Output array. Error code %s \n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } int *d_out2 = NULL; err = cudaMalloc((void **)&d_out2, size); if(err != cudaSuccess){ fprintf(stderr, "Failed to allocates size to device Output array. Error code %s \n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Dimensions dim3 blocksPerGrid(gridSize); dim3 threadsPerBlock(blockSize); size_t shmsz = blockSize*sizeof(int); ReduceRowMajor<<< blocksPerGrid, threadsPerBlock , shmsz >>>(d_arr, d_out, noElements); int *h_out = (int*)malloc(shmz); cudaMemcpy(h_out, d_out2, shmz, cudaMemcpyDeviceToHost); // Display The Matrix DisplayOutputMatrixRowMajor("Row Major Sum Matrix", h_out); free(h_arr); free(h_out); cudaFree(d_arr); cudaFree(d_out); cudaFree(d_out2); // Reset the Device and Exit err = cudaDeviceReset(); if(err != cudaSuccess){ fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
4d0968fa9b3c7c6a4f03da9fbf5b0d1da09df923.hip
// !!! This is a file automatically generated by hipify!!! #include "rocblas.h" void cu_matvec_test() { }
4d0968fa9b3c7c6a4f03da9fbf5b0d1da09df923.cu
#include "cublas.h" void cu_matvec_test() { }
2d1605eddcf26d2a74e09a858d48fcdbc290eeda.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <thrust/unique.h> #include <thrust/sort.h> #include <thrust/set_operations.h> #include <thrust/iterator/discard_iterator.h> #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/densegrid.inl" #include "cupoch/geometry/geometry_functor.h" #include "cupoch/geometry/geometry_utils.h" #include "cupoch/geometry/intersection_test.h" #include "cupoch/geometry/occupancygrid.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/voxelgrid.h" #include "cupoch/utility/eigen.h" #include "cupoch/utility/platform.h" namespace cupoch { namespace geometry { namespace { struct extract_range_voxels_functor { extract_range_voxels_functor(const Eigen::Vector3i& extents, int resolution, const Eigen::Vector3i& min_bound) : extents_(extents), resolution_(resolution), min_bound_(min_bound){}; const Eigen::Vector3i extents_; const int resolution_; const Eigen::Vector3i min_bound_; __device__ int operator()(size_t idx) const { int x = idx / (extents_[1] * extents_[2]); int yz = idx % (extents_[1] * extents_[2]); int y = yz / extents_[2]; int z = yz % extents_[2]; Eigen::Vector3i gidx = min_bound_ + Eigen::Vector3i(x, y, z); return IndexOf(gidx, resolution_); } }; __device__ int VoxelTraversal(Eigen::Vector3i* voxels, int n_buffer, const Eigen::Vector3i& half_resolution, const Eigen::Vector3f& start, const Eigen::Vector3f& end, float voxel_size) { int n_voxels = 0; Eigen::Vector3f ray = end - start; float length = ray.norm(); if (length == 0) { return n_voxels; } ray /= length; Eigen::Vector3i current_voxel(floorf(start[0] / voxel_size), floorf(start[1] / voxel_size), floorf(start[2] / voxel_size)); Eigen::Vector3i last_voxel(floorf(end[0] / voxel_size), floorf(end[1] / voxel_size), floorf(end[2] / voxel_size)); float stepX = (ray[0] > 0) ? 1 : ((ray[0] < 0) ? -1 : 0); float stepY = (ray[1] > 0) ? 1 : ((ray[1] < 0) ? -1 : 0); float stepZ = (ray[2] > 0) ? 1 : ((ray[2] < 0) ? -1 : 0); float voxel_boundary_x = (current_voxel[0] + 0.5 * stepX) * voxel_size; float voxel_boundary_y = (current_voxel[1] + 0.5 * stepY) * voxel_size; float voxel_boundary_z = (current_voxel[2] + 0.5 * stepZ) * voxel_size; float tMaxX = (stepX != 0) ? (voxel_boundary_x - start[0]) / ray[0] : std::numeric_limits<float>::infinity(); float tMaxY = (stepY != 0) ? (voxel_boundary_y - start[1]) / ray[1] : std::numeric_limits<float>::infinity(); float tMaxZ = (stepZ != 0) ? (voxel_boundary_z - start[2]) / ray[2] : std::numeric_limits<float>::infinity(); float tDeltaX = (stepX != 0) ? voxel_size / fabs(ray[0]) : std::numeric_limits<float>::infinity(); float tDeltaY = (stepY != 0) ? voxel_size / fabs(ray[1]) : std::numeric_limits<float>::infinity(); float tDeltaZ = (stepZ != 0) ? voxel_size / fabs(ray[2]) : std::numeric_limits<float>::infinity(); voxels[n_voxels] = current_voxel + half_resolution; ++n_voxels; while (n_voxels < n_buffer) { if (tMaxX < tMaxY) { if (tMaxX < tMaxZ) { current_voxel[0] += stepX; tMaxX += tDeltaX; } else { current_voxel[2] += stepZ; tMaxZ += tDeltaZ; } } else { if (tMaxY < tMaxZ) { current_voxel[1] += stepY; tMaxY += tDeltaY; } else { current_voxel[2] += stepZ; tMaxZ += tDeltaZ; } } if (last_voxel == current_voxel) { break; } else { float dist_from_origin = min(min(tMaxX, tMaxY), tMaxZ); if (dist_from_origin > length) { break; } else { voxels[n_voxels] = current_voxel + half_resolution; ++n_voxels; } } } return n_voxels; } struct compute_voxel_traversal_functor { compute_voxel_traversal_functor(Eigen::Vector3i* voxels, int n_step, const Eigen::Vector3f& viewpoint, const Eigen::Vector3i& half_resolution, float voxel_size, const Eigen::Vector3f& origin) : voxels_(voxels), n_step_(n_step), viewpoint_(viewpoint), half_resolution_(half_resolution), voxel_size_(voxel_size), origin_(origin){}; Eigen::Vector3i* voxels_; const int n_step_; const Eigen::Vector3f viewpoint_; const Eigen::Vector3i half_resolution_; const float voxel_size_; const Eigen::Vector3f origin_; __device__ void operator()( const thrust::tuple<size_t, Eigen::Vector3f>& x) { const int idx = thrust::get<0>(x); const Eigen::Vector3f end = thrust::get<1>(x); VoxelTraversal(voxels_ + idx * n_step_, n_step_, half_resolution_, viewpoint_, end - origin_, voxel_size_); } }; void ComputeFreeVoxels(const utility::device_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float voxel_size, int resolution, Eigen::Vector3f& origin, int n_div, utility::device_vector<Eigen::Vector3i>& free_voxels) { if (points.empty()) return; size_t n_points = points.size(); Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2); free_voxels.resize( n_div * 3 * n_points, Eigen::Vector3i::Constant(geometry::INVALID_VOXEL_INDEX)); compute_voxel_traversal_functor func( thrust::raw_pointer_cast(free_voxels.data()), n_div * 3, viewpoint - origin, half_resolution, voxel_size, origin); thrust::for_each(enumerate_begin(points), enumerate_end(points), func); auto end1 = thrust::remove_if( free_voxels.begin(), free_voxels.end(), [resolution] __device__(const Eigen::Vector3i& idx) -> bool { return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 || idx[0] >= resolution || idx[1] >= resolution || idx[2] >= resolution; }); free_voxels.resize(thrust::distance(free_voxels.begin(), end1)); thrust::sort(utility::exec_policy(0), free_voxels.begin(), free_voxels.end()); auto end2 = thrust::unique(utility::exec_policy(0), free_voxels.begin(), free_voxels.end()); free_voxels.resize(thrust::distance(free_voxels.begin(), end2)); } struct create_occupancy_voxels_functor { create_occupancy_voxels_functor(const Eigen::Vector3f& origin, const Eigen::Vector3i& half_resolution, float voxel_size) : origin_(origin), half_resolution_(half_resolution), voxel_size_(voxel_size){}; const Eigen::Vector3f origin_; const Eigen::Vector3i half_resolution_; const float voxel_size_; __device__ Eigen::Vector3i operator()( const thrust::tuple<Eigen::Vector3f, bool>& x) const { const Eigen::Vector3f& point = thrust::get<0>(x); bool hit_flag = thrust::get<1>(x); Eigen::Vector3f ref_coord = (point - origin_) / voxel_size_; return (hit_flag) ? Eigen::device_vectorize<float, 3, ::floor>(ref_coord) .cast<int>() + half_resolution_ : Eigen::Vector3i(INVALID_VOXEL_INDEX, INVALID_VOXEL_INDEX, INVALID_VOXEL_INDEX); } }; void ComputeOccupiedVoxels( const utility::device_vector<Eigen::Vector3f>& points, const utility::device_vector<bool> hit_flags, float voxel_size, int resolution, Eigen::Vector3f& origin, utility::device_vector<Eigen::Vector3i>& occupied_voxels) { occupied_voxels.resize(points.size()); Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2); create_occupancy_voxels_functor func(origin, half_resolution, voxel_size); thrust::transform(make_tuple_begin(points, hit_flags), make_tuple_end(points, hit_flags), occupied_voxels.begin(), func); auto end1 = thrust::remove_if( occupied_voxels.begin(), occupied_voxels.end(), [resolution] __device__(const Eigen::Vector3i& idx) -> bool { return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 || idx[0] >= resolution || idx[1] >= resolution || idx[2] >= resolution; }); occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end1)); thrust::sort(utility::exec_policy(0), occupied_voxels.begin(), occupied_voxels.end()); auto end2 = thrust::unique(utility::exec_policy(0), occupied_voxels.begin(), occupied_voxels.end()); occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end2)); } struct add_occupancy_functor { add_occupancy_functor(OccupancyVoxel* voxels, int resolution, float clamping_thres_min, float clamping_thres_max, float prob_miss_log, float prob_hit_log, bool occupied) : voxels_(voxels), resolution_(resolution), clamping_thres_min_(clamping_thres_min), clamping_thres_max_(clamping_thres_max), prob_miss_log_(prob_miss_log), prob_hit_log_(prob_hit_log), occupied_(occupied){}; OccupancyVoxel* voxels_; const int resolution_; const float clamping_thres_min_; const float clamping_thres_max_; const float prob_miss_log_; const float prob_hit_log_; const bool occupied_; __device__ void operator()(const Eigen::Vector3i& voxel) { size_t idx = IndexOf(voxel, resolution_); float p = voxels_[idx].prob_log_; p = (isnan(p)) ? 0 : p; p += (occupied_) ? prob_hit_log_ : prob_miss_log_; voxels_[idx].prob_log_ = min(max(p, clamping_thres_min_), clamping_thres_max_); voxels_[idx].grid_index_ = voxel.cast<unsigned short>(); } }; } // namespace template class DenseGrid<OccupancyVoxel>; OccupancyGrid::OccupancyGrid() : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, 0.05, 512, Eigen::Vector3f::Zero()), min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)), max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {} OccupancyGrid::OccupancyGrid(float voxel_size, size_t resolution, const Eigen::Vector3f& origin) : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, voxel_size, resolution, origin), min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)), max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {} OccupancyGrid::~OccupancyGrid() {} OccupancyGrid::OccupancyGrid(const OccupancyGrid& other) : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, other), min_bound_(other.min_bound_), max_bound_(other.max_bound_), clamping_thres_min_(other.clamping_thres_min_), clamping_thres_max_(other.clamping_thres_max_), prob_hit_log_(other.prob_hit_log_), prob_miss_log_(other.prob_miss_log_), occ_prob_thres_log_(other.occ_prob_thres_log_), visualize_free_area_(other.visualize_free_area_) {} OccupancyGrid& OccupancyGrid::Clear() { DenseGrid::Clear(); min_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2); max_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2); return *this; } Eigen::Vector3f OccupancyGrid::GetMinBound() const { return (min_bound_.cast<int>() - Eigen::Vector3i::Constant(resolution_ / 2)) .cast<float>() * voxel_size_ + origin_; } Eigen::Vector3f OccupancyGrid::GetMaxBound() const { return (max_bound_.cast<int>() - Eigen::Vector3i::Constant(resolution_ / 2 - 1)) .cast<float>() * voxel_size_ + origin_; } bool OccupancyGrid::IsOccupied(const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return false; OccupancyVoxel voxel = voxels_[idx]; return !std::isnan(voxel.prob_log_) && voxel.prob_log_ > occ_prob_thres_log_; } bool OccupancyGrid::IsUnknown(const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return true; OccupancyVoxel voxel = voxels_[idx]; return std::isnan(voxel.prob_log_); } thrust::tuple<bool, OccupancyVoxel> OccupancyGrid::GetVoxel( const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return thrust::make_tuple(false, OccupancyVoxel()); OccupancyVoxel voxel = voxels_[idx]; return thrust::make_tuple(!std::isnan(voxel.prob_log_), voxel); } template <typename Func> std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractBoundVoxels(Func check_func) const { Eigen::Vector3ui16 diff = max_bound_ - min_bound_ + Eigen::Vector3ui16::Ones(); auto out = std::make_shared<utility::device_vector<OccupancyVoxel>>(); out->resize(diff[0] * diff[1] * diff[2]); extract_range_voxels_functor func(diff.cast<int>(), resolution_, min_bound_.cast<int>()); auto end = thrust::copy_if( thrust::make_permutation_iterator( voxels_.begin(), thrust::make_transform_iterator( thrust::make_counting_iterator<size_t>(0), func)), thrust::make_permutation_iterator( voxels_.begin(), thrust::make_transform_iterator( thrust::make_counting_iterator(out->size()), func)), out->begin(), check_func); out->resize(thrust::distance(out->begin(), end)); return out; } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractKnownVoxels() const { auto check_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return !isnan(v.prob_log_); }; return ExtractBoundVoxels(check_fn); } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractFreeVoxels() const { auto check_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return !isnan(v.prob_log_) && v.prob_log_ <= th; }; return ExtractBoundVoxels(check_fn); } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractOccupiedVoxels() const { auto check_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return !isnan(v.prob_log_) && v.prob_log_ > th; }; return ExtractBoundVoxels(check_fn); } OccupancyGrid& OccupancyGrid::Reconstruct(float voxel_size, int resolution) { DenseGrid::Reconstruct(voxel_size, resolution); return *this; } OccupancyGrid& OccupancyGrid::SetFreeArea(const Eigen::Vector3f& min_bound, const Eigen::Vector3f& max_bound) { const Eigen::Vector3i half_res = Eigen::Vector3i::Constant(resolution_ / 2); Eigen::Vector3i imin_bound = ((min_bound - origin_) / voxel_size_) .array() .floor() .matrix() .cast<int>() + half_res; Eigen::Vector3i imax_bound = ((max_bound - origin_) / voxel_size_) .array() .floor() .matrix() .cast<int>() + half_res; min_bound_ = imin_bound.array() .max(Eigen::Array3i(0, 0, 0)) .matrix() .cast<unsigned short>(); max_bound_ = imax_bound.array() .min(Eigen::Array3i(resolution_ - 1, resolution_ - 1, resolution_ - 1)) .matrix() .cast<unsigned short>(); Eigen::Vector3ui16 diff = max_bound_ - min_bound_ + Eigen::Vector3ui16::Ones(); extract_range_voxels_functor func(diff.cast<int>(), resolution_, min_bound_.cast<int>()); thrust::for_each( thrust::make_permutation_iterator( voxels_.begin(), thrust::make_transform_iterator( thrust::make_counting_iterator<size_t>(0), func)), thrust::make_permutation_iterator( voxels_.begin(), thrust::make_transform_iterator( thrust::make_counting_iterator<size_t>( diff[0] * diff[1] * diff[2]), func)), [pml = prob_miss_log_] __device__(geometry::OccupancyVoxel & v) { v.prob_log_ = (isnan(v.prob_log_)) ? 0 : v.prob_log_; v.prob_log_ += pml; }); return *this; } OccupancyGrid& OccupancyGrid::Insert( const utility::device_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float max_range) { if (points.empty()) return *this; utility::device_vector<Eigen::Vector3f> ranged_points(points.size()); utility::device_vector<float> ranged_dists(points.size()); utility::device_vector<bool> hit_flags(points.size()); thrust::transform( points.begin(), points.end(), make_tuple_begin(ranged_points, ranged_dists, hit_flags), [viewpoint, max_range] __device__(const Eigen::Vector3f& pt) { const Eigen::Vector3f pt_vp = pt - viewpoint; const float dist = pt_vp.norm(); const bool is_hit = max_range < 0 || dist <= max_range; const Eigen::Vector3f ranged_pt = (is_hit) ? pt : ((dist == 0) ? viewpoint : viewpoint + pt_vp / dist * max_range); return thrust::make_tuple( ranged_pt, (ranged_pt - viewpoint).array().abs().maxCoeff(), is_hit); }); float max_dist = *(thrust::max_element(ranged_dists.begin(), ranged_dists.end())); int n_div = int(::ceil(max_dist / voxel_size_)); utility::device_vector<Eigen::Vector3i> free_voxels; utility::device_vector<Eigen::Vector3i> occupied_voxels; if (n_div > 0) { // comupute free voxels ComputeFreeVoxels(ranged_points, viewpoint, voxel_size_, resolution_, origin_, n_div + 1, free_voxels); } // compute occupied voxels ComputeOccupiedVoxels(ranged_points, hit_flags, voxel_size_, resolution_, origin_, occupied_voxels); if (n_div > 0) { utility::device_vector<Eigen::Vector3i> free_voxels_res( free_voxels.size()); auto end = thrust::set_difference( free_voxels.begin(), free_voxels.end(), occupied_voxels.begin(), occupied_voxels.end(), free_voxels_res.begin()); free_voxels_res.resize(thrust::distance(free_voxels_res.begin(), end)); AddVoxels(free_voxels_res, false); } AddVoxels(occupied_voxels, true); return *this; } OccupancyGrid& OccupancyGrid::Insert( const utility::pinned_host_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float max_range) { utility::device_vector<Eigen::Vector3f> dev_points(points.size()); cudaSafeCall(hipMemcpy( thrust::raw_pointer_cast(dev_points.data()), points.data(), points.size() * sizeof(Eigen::Vector3f), hipMemcpyHostToDevice)); return Insert(dev_points, viewpoint, max_range); } OccupancyGrid& OccupancyGrid::Insert(const geometry::PointCloud& pointcloud, const Eigen::Vector3f& viewpoint, float max_range) { Insert(pointcloud.points_, viewpoint, max_range); return *this; } OccupancyGrid& OccupancyGrid::AddVoxel(const Eigen::Vector3i& voxel, bool occupied) { int idx = IndexOf(voxel, resolution_); size_t max_idx = resolution_ * resolution_ * resolution_; if (idx < 0 || idx >= max_idx) { utility::LogError( "[OccupancyGrid] a provided voxeld is not occupancy grid " "range."); return *this; } else { OccupancyVoxel org_ov = voxels_[idx]; if (std::isnan(org_ov.prob_log_)) org_ov.prob_log_ = 0.0; org_ov.prob_log_ += (occupied) ? prob_hit_log_ : prob_miss_log_; org_ov.prob_log_ = ::min(::max(org_ov.prob_log_, clamping_thres_min_), clamping_thres_max_); org_ov.grid_index_ = voxel.cast<unsigned short>(); voxels_[idx] = org_ov; min_bound_ = min_bound_.array().min(org_ov.grid_index_.array()); max_bound_ = max_bound_.array().max(org_ov.grid_index_.array()); } return *this; } OccupancyGrid& OccupancyGrid::AddVoxels( const utility::device_vector<Eigen::Vector3i>& voxels, bool occupied) { if (voxels.empty()) return *this; Eigen::Vector3i minv = utility::ComputeMinBound<3, int>(voxels); Eigen::Vector3i maxv = utility::ComputeMaxBound<3, int>(voxels); Eigen::Vector3ui16 minvu = minv.cast<unsigned short>(); Eigen::Vector3ui16 maxvu = maxv.cast<unsigned short>(); min_bound_ = min_bound_.array().min(minvu.array()); min_bound_ = min_bound_.array().min(maxvu.array()); max_bound_ = max_bound_.array().max(minvu.array()); max_bound_ = max_bound_.array().max(maxvu.array()); add_occupancy_functor func(thrust::raw_pointer_cast(voxels_.data()), resolution_, clamping_thres_min_, clamping_thres_max_, prob_miss_log_, prob_hit_log_, occupied); thrust::for_each(voxels.begin(), voxels.end(), func); return *this; } } // namespace geometry } // namespace cupoch
2d1605eddcf26d2a74e09a858d48fcdbc290eeda.cu
/** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <thrust/unique.h> #include <thrust/sort.h> #include <thrust/set_operations.h> #include <thrust/iterator/discard_iterator.h> #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/densegrid.inl" #include "cupoch/geometry/geometry_functor.h" #include "cupoch/geometry/geometry_utils.h" #include "cupoch/geometry/intersection_test.h" #include "cupoch/geometry/occupancygrid.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/voxelgrid.h" #include "cupoch/utility/eigen.h" #include "cupoch/utility/platform.h" namespace cupoch { namespace geometry { namespace { struct extract_range_voxels_functor { extract_range_voxels_functor(const Eigen::Vector3i& extents, int resolution, const Eigen::Vector3i& min_bound) : extents_(extents), resolution_(resolution), min_bound_(min_bound){}; const Eigen::Vector3i extents_; const int resolution_; const Eigen::Vector3i min_bound_; __device__ int operator()(size_t idx) const { int x = idx / (extents_[1] * extents_[2]); int yz = idx % (extents_[1] * extents_[2]); int y = yz / extents_[2]; int z = yz % extents_[2]; Eigen::Vector3i gidx = min_bound_ + Eigen::Vector3i(x, y, z); return IndexOf(gidx, resolution_); } }; __device__ int VoxelTraversal(Eigen::Vector3i* voxels, int n_buffer, const Eigen::Vector3i& half_resolution, const Eigen::Vector3f& start, const Eigen::Vector3f& end, float voxel_size) { int n_voxels = 0; Eigen::Vector3f ray = end - start; float length = ray.norm(); if (length == 0) { return n_voxels; } ray /= length; Eigen::Vector3i current_voxel(floorf(start[0] / voxel_size), floorf(start[1] / voxel_size), floorf(start[2] / voxel_size)); Eigen::Vector3i last_voxel(floorf(end[0] / voxel_size), floorf(end[1] / voxel_size), floorf(end[2] / voxel_size)); float stepX = (ray[0] > 0) ? 1 : ((ray[0] < 0) ? -1 : 0); float stepY = (ray[1] > 0) ? 1 : ((ray[1] < 0) ? -1 : 0); float stepZ = (ray[2] > 0) ? 1 : ((ray[2] < 0) ? -1 : 0); float voxel_boundary_x = (current_voxel[0] + 0.5 * stepX) * voxel_size; float voxel_boundary_y = (current_voxel[1] + 0.5 * stepY) * voxel_size; float voxel_boundary_z = (current_voxel[2] + 0.5 * stepZ) * voxel_size; float tMaxX = (stepX != 0) ? (voxel_boundary_x - start[0]) / ray[0] : std::numeric_limits<float>::infinity(); float tMaxY = (stepY != 0) ? (voxel_boundary_y - start[1]) / ray[1] : std::numeric_limits<float>::infinity(); float tMaxZ = (stepZ != 0) ? (voxel_boundary_z - start[2]) / ray[2] : std::numeric_limits<float>::infinity(); float tDeltaX = (stepX != 0) ? voxel_size / fabs(ray[0]) : std::numeric_limits<float>::infinity(); float tDeltaY = (stepY != 0) ? voxel_size / fabs(ray[1]) : std::numeric_limits<float>::infinity(); float tDeltaZ = (stepZ != 0) ? voxel_size / fabs(ray[2]) : std::numeric_limits<float>::infinity(); voxels[n_voxels] = current_voxel + half_resolution; ++n_voxels; while (n_voxels < n_buffer) { if (tMaxX < tMaxY) { if (tMaxX < tMaxZ) { current_voxel[0] += stepX; tMaxX += tDeltaX; } else { current_voxel[2] += stepZ; tMaxZ += tDeltaZ; } } else { if (tMaxY < tMaxZ) { current_voxel[1] += stepY; tMaxY += tDeltaY; } else { current_voxel[2] += stepZ; tMaxZ += tDeltaZ; } } if (last_voxel == current_voxel) { break; } else { float dist_from_origin = min(min(tMaxX, tMaxY), tMaxZ); if (dist_from_origin > length) { break; } else { voxels[n_voxels] = current_voxel + half_resolution; ++n_voxels; } } } return n_voxels; } struct compute_voxel_traversal_functor { compute_voxel_traversal_functor(Eigen::Vector3i* voxels, int n_step, const Eigen::Vector3f& viewpoint, const Eigen::Vector3i& half_resolution, float voxel_size, const Eigen::Vector3f& origin) : voxels_(voxels), n_step_(n_step), viewpoint_(viewpoint), half_resolution_(half_resolution), voxel_size_(voxel_size), origin_(origin){}; Eigen::Vector3i* voxels_; const int n_step_; const Eigen::Vector3f viewpoint_; const Eigen::Vector3i half_resolution_; const float voxel_size_; const Eigen::Vector3f origin_; __device__ void operator()( const thrust::tuple<size_t, Eigen::Vector3f>& x) { const int idx = thrust::get<0>(x); const Eigen::Vector3f end = thrust::get<1>(x); VoxelTraversal(voxels_ + idx * n_step_, n_step_, half_resolution_, viewpoint_, end - origin_, voxel_size_); } }; void ComputeFreeVoxels(const utility::device_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float voxel_size, int resolution, Eigen::Vector3f& origin, int n_div, utility::device_vector<Eigen::Vector3i>& free_voxels) { if (points.empty()) return; size_t n_points = points.size(); Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2); free_voxels.resize( n_div * 3 * n_points, Eigen::Vector3i::Constant(geometry::INVALID_VOXEL_INDEX)); compute_voxel_traversal_functor func( thrust::raw_pointer_cast(free_voxels.data()), n_div * 3, viewpoint - origin, half_resolution, voxel_size, origin); thrust::for_each(enumerate_begin(points), enumerate_end(points), func); auto end1 = thrust::remove_if( free_voxels.begin(), free_voxels.end(), [resolution] __device__(const Eigen::Vector3i& idx) -> bool { return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 || idx[0] >= resolution || idx[1] >= resolution || idx[2] >= resolution; }); free_voxels.resize(thrust::distance(free_voxels.begin(), end1)); thrust::sort(utility::exec_policy(0), free_voxels.begin(), free_voxels.end()); auto end2 = thrust::unique(utility::exec_policy(0), free_voxels.begin(), free_voxels.end()); free_voxels.resize(thrust::distance(free_voxels.begin(), end2)); } struct create_occupancy_voxels_functor { create_occupancy_voxels_functor(const Eigen::Vector3f& origin, const Eigen::Vector3i& half_resolution, float voxel_size) : origin_(origin), half_resolution_(half_resolution), voxel_size_(voxel_size){}; const Eigen::Vector3f origin_; const Eigen::Vector3i half_resolution_; const float voxel_size_; __device__ Eigen::Vector3i operator()( const thrust::tuple<Eigen::Vector3f, bool>& x) const { const Eigen::Vector3f& point = thrust::get<0>(x); bool hit_flag = thrust::get<1>(x); Eigen::Vector3f ref_coord = (point - origin_) / voxel_size_; return (hit_flag) ? Eigen::device_vectorize<float, 3, ::floor>(ref_coord) .cast<int>() + half_resolution_ : Eigen::Vector3i(INVALID_VOXEL_INDEX, INVALID_VOXEL_INDEX, INVALID_VOXEL_INDEX); } }; void ComputeOccupiedVoxels( const utility::device_vector<Eigen::Vector3f>& points, const utility::device_vector<bool> hit_flags, float voxel_size, int resolution, Eigen::Vector3f& origin, utility::device_vector<Eigen::Vector3i>& occupied_voxels) { occupied_voxels.resize(points.size()); Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2); create_occupancy_voxels_functor func(origin, half_resolution, voxel_size); thrust::transform(make_tuple_begin(points, hit_flags), make_tuple_end(points, hit_flags), occupied_voxels.begin(), func); auto end1 = thrust::remove_if( occupied_voxels.begin(), occupied_voxels.end(), [resolution] __device__(const Eigen::Vector3i& idx) -> bool { return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 || idx[0] >= resolution || idx[1] >= resolution || idx[2] >= resolution; }); occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end1)); thrust::sort(utility::exec_policy(0), occupied_voxels.begin(), occupied_voxels.end()); auto end2 = thrust::unique(utility::exec_policy(0), occupied_voxels.begin(), occupied_voxels.end()); occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end2)); } struct add_occupancy_functor { add_occupancy_functor(OccupancyVoxel* voxels, int resolution, float clamping_thres_min, float clamping_thres_max, float prob_miss_log, float prob_hit_log, bool occupied) : voxels_(voxels), resolution_(resolution), clamping_thres_min_(clamping_thres_min), clamping_thres_max_(clamping_thres_max), prob_miss_log_(prob_miss_log), prob_hit_log_(prob_hit_log), occupied_(occupied){}; OccupancyVoxel* voxels_; const int resolution_; const float clamping_thres_min_; const float clamping_thres_max_; const float prob_miss_log_; const float prob_hit_log_; const bool occupied_; __device__ void operator()(const Eigen::Vector3i& voxel) { size_t idx = IndexOf(voxel, resolution_); float p = voxels_[idx].prob_log_; p = (isnan(p)) ? 0 : p; p += (occupied_) ? prob_hit_log_ : prob_miss_log_; voxels_[idx].prob_log_ = min(max(p, clamping_thres_min_), clamping_thres_max_); voxels_[idx].grid_index_ = voxel.cast<unsigned short>(); } }; } // namespace template class DenseGrid<OccupancyVoxel>; OccupancyGrid::OccupancyGrid() : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, 0.05, 512, Eigen::Vector3f::Zero()), min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)), max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {} OccupancyGrid::OccupancyGrid(float voxel_size, size_t resolution, const Eigen::Vector3f& origin) : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, voxel_size, resolution, origin), min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)), max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {} OccupancyGrid::~OccupancyGrid() {} OccupancyGrid::OccupancyGrid(const OccupancyGrid& other) : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, other), min_bound_(other.min_bound_), max_bound_(other.max_bound_), clamping_thres_min_(other.clamping_thres_min_), clamping_thres_max_(other.clamping_thres_max_), prob_hit_log_(other.prob_hit_log_), prob_miss_log_(other.prob_miss_log_), occ_prob_thres_log_(other.occ_prob_thres_log_), visualize_free_area_(other.visualize_free_area_) {} OccupancyGrid& OccupancyGrid::Clear() { DenseGrid::Clear(); min_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2); max_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2); return *this; } Eigen::Vector3f OccupancyGrid::GetMinBound() const { return (min_bound_.cast<int>() - Eigen::Vector3i::Constant(resolution_ / 2)) .cast<float>() * voxel_size_ + origin_; } Eigen::Vector3f OccupancyGrid::GetMaxBound() const { return (max_bound_.cast<int>() - Eigen::Vector3i::Constant(resolution_ / 2 - 1)) .cast<float>() * voxel_size_ + origin_; } bool OccupancyGrid::IsOccupied(const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return false; OccupancyVoxel voxel = voxels_[idx]; return !std::isnan(voxel.prob_log_) && voxel.prob_log_ > occ_prob_thres_log_; } bool OccupancyGrid::IsUnknown(const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return true; OccupancyVoxel voxel = voxels_[idx]; return std::isnan(voxel.prob_log_); } thrust::tuple<bool, OccupancyVoxel> OccupancyGrid::GetVoxel( const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return thrust::make_tuple(false, OccupancyVoxel()); OccupancyVoxel voxel = voxels_[idx]; return thrust::make_tuple(!std::isnan(voxel.prob_log_), voxel); } template <typename Func> std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractBoundVoxels(Func check_func) const { Eigen::Vector3ui16 diff = max_bound_ - min_bound_ + Eigen::Vector3ui16::Ones(); auto out = std::make_shared<utility::device_vector<OccupancyVoxel>>(); out->resize(diff[0] * diff[1] * diff[2]); extract_range_voxels_functor func(diff.cast<int>(), resolution_, min_bound_.cast<int>()); auto end = thrust::copy_if( thrust::make_permutation_iterator( voxels_.begin(), thrust::make_transform_iterator( thrust::make_counting_iterator<size_t>(0), func)), thrust::make_permutation_iterator( voxels_.begin(), thrust::make_transform_iterator( thrust::make_counting_iterator(out->size()), func)), out->begin(), check_func); out->resize(thrust::distance(out->begin(), end)); return out; } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractKnownVoxels() const { auto check_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return !isnan(v.prob_log_); }; return ExtractBoundVoxels(check_fn); } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractFreeVoxels() const { auto check_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return !isnan(v.prob_log_) && v.prob_log_ <= th; }; return ExtractBoundVoxels(check_fn); } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractOccupiedVoxels() const { auto check_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return !isnan(v.prob_log_) && v.prob_log_ > th; }; return ExtractBoundVoxels(check_fn); } OccupancyGrid& OccupancyGrid::Reconstruct(float voxel_size, int resolution) { DenseGrid::Reconstruct(voxel_size, resolution); return *this; } OccupancyGrid& OccupancyGrid::SetFreeArea(const Eigen::Vector3f& min_bound, const Eigen::Vector3f& max_bound) { const Eigen::Vector3i half_res = Eigen::Vector3i::Constant(resolution_ / 2); Eigen::Vector3i imin_bound = ((min_bound - origin_) / voxel_size_) .array() .floor() .matrix() .cast<int>() + half_res; Eigen::Vector3i imax_bound = ((max_bound - origin_) / voxel_size_) .array() .floor() .matrix() .cast<int>() + half_res; min_bound_ = imin_bound.array() .max(Eigen::Array3i(0, 0, 0)) .matrix() .cast<unsigned short>(); max_bound_ = imax_bound.array() .min(Eigen::Array3i(resolution_ - 1, resolution_ - 1, resolution_ - 1)) .matrix() .cast<unsigned short>(); Eigen::Vector3ui16 diff = max_bound_ - min_bound_ + Eigen::Vector3ui16::Ones(); extract_range_voxels_functor func(diff.cast<int>(), resolution_, min_bound_.cast<int>()); thrust::for_each( thrust::make_permutation_iterator( voxels_.begin(), thrust::make_transform_iterator( thrust::make_counting_iterator<size_t>(0), func)), thrust::make_permutation_iterator( voxels_.begin(), thrust::make_transform_iterator( thrust::make_counting_iterator<size_t>( diff[0] * diff[1] * diff[2]), func)), [pml = prob_miss_log_] __device__(geometry::OccupancyVoxel & v) { v.prob_log_ = (isnan(v.prob_log_)) ? 0 : v.prob_log_; v.prob_log_ += pml; }); return *this; } OccupancyGrid& OccupancyGrid::Insert( const utility::device_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float max_range) { if (points.empty()) return *this; utility::device_vector<Eigen::Vector3f> ranged_points(points.size()); utility::device_vector<float> ranged_dists(points.size()); utility::device_vector<bool> hit_flags(points.size()); thrust::transform( points.begin(), points.end(), make_tuple_begin(ranged_points, ranged_dists, hit_flags), [viewpoint, max_range] __device__(const Eigen::Vector3f& pt) { const Eigen::Vector3f pt_vp = pt - viewpoint; const float dist = pt_vp.norm(); const bool is_hit = max_range < 0 || dist <= max_range; const Eigen::Vector3f ranged_pt = (is_hit) ? pt : ((dist == 0) ? viewpoint : viewpoint + pt_vp / dist * max_range); return thrust::make_tuple( ranged_pt, (ranged_pt - viewpoint).array().abs().maxCoeff(), is_hit); }); float max_dist = *(thrust::max_element(ranged_dists.begin(), ranged_dists.end())); int n_div = int(std::ceil(max_dist / voxel_size_)); utility::device_vector<Eigen::Vector3i> free_voxels; utility::device_vector<Eigen::Vector3i> occupied_voxels; if (n_div > 0) { // comupute free voxels ComputeFreeVoxels(ranged_points, viewpoint, voxel_size_, resolution_, origin_, n_div + 1, free_voxels); } // compute occupied voxels ComputeOccupiedVoxels(ranged_points, hit_flags, voxel_size_, resolution_, origin_, occupied_voxels); if (n_div > 0) { utility::device_vector<Eigen::Vector3i> free_voxels_res( free_voxels.size()); auto end = thrust::set_difference( free_voxels.begin(), free_voxels.end(), occupied_voxels.begin(), occupied_voxels.end(), free_voxels_res.begin()); free_voxels_res.resize(thrust::distance(free_voxels_res.begin(), end)); AddVoxels(free_voxels_res, false); } AddVoxels(occupied_voxels, true); return *this; } OccupancyGrid& OccupancyGrid::Insert( const utility::pinned_host_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float max_range) { utility::device_vector<Eigen::Vector3f> dev_points(points.size()); cudaSafeCall(cudaMemcpy( thrust::raw_pointer_cast(dev_points.data()), points.data(), points.size() * sizeof(Eigen::Vector3f), cudaMemcpyHostToDevice)); return Insert(dev_points, viewpoint, max_range); } OccupancyGrid& OccupancyGrid::Insert(const geometry::PointCloud& pointcloud, const Eigen::Vector3f& viewpoint, float max_range) { Insert(pointcloud.points_, viewpoint, max_range); return *this; } OccupancyGrid& OccupancyGrid::AddVoxel(const Eigen::Vector3i& voxel, bool occupied) { int idx = IndexOf(voxel, resolution_); size_t max_idx = resolution_ * resolution_ * resolution_; if (idx < 0 || idx >= max_idx) { utility::LogError( "[OccupancyGrid] a provided voxeld is not occupancy grid " "range."); return *this; } else { OccupancyVoxel org_ov = voxels_[idx]; if (std::isnan(org_ov.prob_log_)) org_ov.prob_log_ = 0.0; org_ov.prob_log_ += (occupied) ? prob_hit_log_ : prob_miss_log_; org_ov.prob_log_ = std::min(std::max(org_ov.prob_log_, clamping_thres_min_), clamping_thres_max_); org_ov.grid_index_ = voxel.cast<unsigned short>(); voxels_[idx] = org_ov; min_bound_ = min_bound_.array().min(org_ov.grid_index_.array()); max_bound_ = max_bound_.array().max(org_ov.grid_index_.array()); } return *this; } OccupancyGrid& OccupancyGrid::AddVoxels( const utility::device_vector<Eigen::Vector3i>& voxels, bool occupied) { if (voxels.empty()) return *this; Eigen::Vector3i minv = utility::ComputeMinBound<3, int>(voxels); Eigen::Vector3i maxv = utility::ComputeMaxBound<3, int>(voxels); Eigen::Vector3ui16 minvu = minv.cast<unsigned short>(); Eigen::Vector3ui16 maxvu = maxv.cast<unsigned short>(); min_bound_ = min_bound_.array().min(minvu.array()); min_bound_ = min_bound_.array().min(maxvu.array()); max_bound_ = max_bound_.array().max(minvu.array()); max_bound_ = max_bound_.array().max(maxvu.array()); add_occupancy_functor func(thrust::raw_pointer_cast(voxels_.data()), resolution_, clamping_thres_min_, clamping_thres_max_, prob_miss_log_, prob_hit_log_, occupied); thrust::for_each(voxels.begin(), voxels.end(), func); return *this; } } // namespace geometry } // namespace cupoch
f62bda0f2c9a08f50b6f9f8d398041b334ec6a08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This program computes matrix multiplication using shared memory tiling // By: Nick from CoffeeBeforeArch #include <algorithm> #include <cassert> #include <cstdlib> #include <functional> #include <iostream> #include <vector> #define BILLION 1000000000L ; using std::cout; using std::endl; using std::generate; using std::vector; // Pull out matrix and shared memory tile size const int N = 1 << 10; const int SHMEM_SIZE = 1 << 10; __global__ void matrixMul(const int *a, const int *b, int *c) { // Compute each thread's global row and column index int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // Statically allocated shared memory __shared__ int s_a[SHMEM_SIZE]; __shared__ int s_b[SHMEM_SIZE]; // Accumulate in temporary variable int tmp = 0; // Sweep tile across matrix for (int i = 0; i < N; i += blockDim.x) { // Load in elements for this tile s_a[threadIdx.y * blockDim.x + threadIdx.x] = a[row * N + i + threadIdx.x]; s_b[threadIdx.y * blockDim.x + threadIdx.x] = b[i * N + threadIdx.y * N + col]; // Wait for both tiles to be loaded in before doing computation __syncthreads(); // Do matrix multiplication on the small matrix for (int j = 0; j < blockDim.x; j++) { tmp += s_a[threadIdx.y * blockDim.x + j] * s_b[j * blockDim.x + threadIdx.x]; } // Wait for all threads to finish using current tiles before loading in new // ones __syncthreads(); } // Write back results c[row * N + col] = tmp; } // Check result on the CPU void verify_result(vector<int> &a, vector<int> &b, vector<int> &c) { // For every row... for (int i = 0; i < N; i++) { // For every column... for (int j = 0; j < N; j++) { // For every element in the row-column pair int tmp = 0; for (int k = 0; k < N; k++) { // Accumulate the partial results tmp += a[i * N + k] * b[k * N + j]; } // Check against the CPU result assert(tmp == c[i * N + j]); } } } int main() { // Size (in bytes) of matrix size_t bytes = N * N * sizeof(int); struct timespec start, stop; double accum; // Host vectors vector<int> h_a(N * N); vector<int> h_b(N * N); vector<int> h_c(N * N); // Initialize matrices generate(h_a.begin(), h_a.end(), []() { return rand() % 100; }); generate(h_b.begin(), h_b.end(), []() { return rand() % 100; }); // Allocate device memory int *d_a, *d_b, *d_c; hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); // Copy data to the device hipMemcpy(d_a, h_a.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b.data(), bytes, hipMemcpyHostToDevice); // Threads per CTA dimension int THREADS = 32; // Blocks per grid dimension (assumes THREADS divides N evenly) int BLOCKS = N / THREADS; // Use dim3 structs for block and grid dimensions dim3 threads(THREADS, THREADS); dim3 blocks(BLOCKS, BLOCKS); clock_gettime(CLOCK_REALTIME, &start); // Launch kernel hipLaunchKernelGGL(( matrixMul), dim3(blocks), dim3(threads), 0, 0, d_a, d_b, d_c); // Copy back to the host hipMemcpy(h_c.data(), d_c, bytes, hipMemcpyDeviceToHost); clock_gettime(CLOCK_REALTIME, &stop); accum = ( stop.tv_sec - start.tv_sec )+ // elapsed time ( stop.tv_nsec - start.tv_nsec )/( double ) BILLION ; cout << "CUDA wall time=" << accum << endl; // Check result verify_result(h_a, h_b, h_c); cout << "COMPLETED SUCCESSFULLY\n"; // Free memory on device hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
f62bda0f2c9a08f50b6f9f8d398041b334ec6a08.cu
// This program computes matrix multiplication using shared memory tiling // By: Nick from CoffeeBeforeArch #include <algorithm> #include <cassert> #include <cstdlib> #include <functional> #include <iostream> #include <vector> #define BILLION 1000000000L ; using std::cout; using std::endl; using std::generate; using std::vector; // Pull out matrix and shared memory tile size const int N = 1 << 10; const int SHMEM_SIZE = 1 << 10; __global__ void matrixMul(const int *a, const int *b, int *c) { // Compute each thread's global row and column index int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // Statically allocated shared memory __shared__ int s_a[SHMEM_SIZE]; __shared__ int s_b[SHMEM_SIZE]; // Accumulate in temporary variable int tmp = 0; // Sweep tile across matrix for (int i = 0; i < N; i += blockDim.x) { // Load in elements for this tile s_a[threadIdx.y * blockDim.x + threadIdx.x] = a[row * N + i + threadIdx.x]; s_b[threadIdx.y * blockDim.x + threadIdx.x] = b[i * N + threadIdx.y * N + col]; // Wait for both tiles to be loaded in before doing computation __syncthreads(); // Do matrix multiplication on the small matrix for (int j = 0; j < blockDim.x; j++) { tmp += s_a[threadIdx.y * blockDim.x + j] * s_b[j * blockDim.x + threadIdx.x]; } // Wait for all threads to finish using current tiles before loading in new // ones __syncthreads(); } // Write back results c[row * N + col] = tmp; } // Check result on the CPU void verify_result(vector<int> &a, vector<int> &b, vector<int> &c) { // For every row... for (int i = 0; i < N; i++) { // For every column... for (int j = 0; j < N; j++) { // For every element in the row-column pair int tmp = 0; for (int k = 0; k < N; k++) { // Accumulate the partial results tmp += a[i * N + k] * b[k * N + j]; } // Check against the CPU result assert(tmp == c[i * N + j]); } } } int main() { // Size (in bytes) of matrix size_t bytes = N * N * sizeof(int); struct timespec start, stop; double accum; // Host vectors vector<int> h_a(N * N); vector<int> h_b(N * N); vector<int> h_c(N * N); // Initialize matrices generate(h_a.begin(), h_a.end(), []() { return rand() % 100; }); generate(h_b.begin(), h_b.end(), []() { return rand() % 100; }); // Allocate device memory int *d_a, *d_b, *d_c; cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); // Copy data to the device cudaMemcpy(d_a, h_a.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b.data(), bytes, cudaMemcpyHostToDevice); // Threads per CTA dimension int THREADS = 32; // Blocks per grid dimension (assumes THREADS divides N evenly) int BLOCKS = N / THREADS; // Use dim3 structs for block and grid dimensions dim3 threads(THREADS, THREADS); dim3 blocks(BLOCKS, BLOCKS); clock_gettime(CLOCK_REALTIME, &start); // Launch kernel matrixMul<<<blocks, threads>>>(d_a, d_b, d_c); // Copy back to the host cudaMemcpy(h_c.data(), d_c, bytes, cudaMemcpyDeviceToHost); clock_gettime(CLOCK_REALTIME, &stop); accum = ( stop.tv_sec - start.tv_sec )+ // elapsed time ( stop.tv_nsec - start.tv_nsec )/( double ) BILLION ; cout << "CUDA wall time=" << accum << endl; // Check result verify_result(h_a, h_b, h_c); cout << "COMPLETED SUCCESSFULLY\n"; // Free memory on device cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
6b74253ee2d86a19641b0baffd4c71551e48d115.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Fractale.h" #include "FractaleMath.h" #include "Julia.h" #include "IndiceTools_GPU.h" #include "DomaineMath_GPU.h" #include <iostream> #include <assert.h> extern __global__ void JuliaKernel(uchar4* ptrDevPixels, uint w, uint h,const DomaineMath &domaineMath, uint n, float t); Julia::Julia(const Grid& grid, uint w, uint h, const DomaineMath& domaineMath): Fractale(grid,w,h,domaineMath){} Julia::~Julia(){} void Julia::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath) { Device::lastCudaError("Fractale Julia rgba uchar4 (before)"); // facultatif, for debug only, remove for release hipLaunchKernelGGL(( JuliaKernel), dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,domaineMath,n,t); Device::lastCudaError("Fractale Julia rgba uchar4 (after)"); // facultatif, for debug only, remove for release } __device__ void Julia::workPixel(uchar4* ptrColorIJ, int i, int j, const DomaineMath& domaineMath, JuliaMath* ptrJuliaMath) { // (i,j) domaine ecran dans N2 // (x,y) domaine math dans R2 double x; double y; domaineMath.toXY(i, j, &x, &y); // fill (x,y) from (i,j) // float t=variateurAnimation.get(); //ptrJuliaMath->colorXY(ptrColorIJ, x, y); // in [01] }
6b74253ee2d86a19641b0baffd4c71551e48d115.cu
#include "Fractale.h" #include "FractaleMath.h" #include "Julia.h" #include "IndiceTools_GPU.h" #include "DomaineMath_GPU.h" #include <iostream> #include <assert.h> extern __global__ void JuliaKernel(uchar4* ptrDevPixels, uint w, uint h,const DomaineMath &domaineMath, uint n, float t); Julia::Julia(const Grid& grid, uint w, uint h, const DomaineMath& domaineMath): Fractale(grid,w,h,domaineMath){} Julia::~Julia(){} void Julia::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath) { Device::lastCudaError("Fractale Julia rgba uchar4 (before)"); // facultatif, for debug only, remove for release JuliaKernel<<<dg,db>>>(ptrDevPixels,w,h,domaineMath,n,t); Device::lastCudaError("Fractale Julia rgba uchar4 (after)"); // facultatif, for debug only, remove for release } __device__ void Julia::workPixel(uchar4* ptrColorIJ, int i, int j, const DomaineMath& domaineMath, JuliaMath* ptrJuliaMath) { // (i,j) domaine ecran dans N2 // (x,y) domaine math dans R2 double x; double y; domaineMath.toXY(i, j, &x, &y); // fill (x,y) from (i,j) // float t=variateurAnimation.get(); //ptrJuliaMath->colorXY(ptrColorIJ, x, y); // in [01] }
b6156c4677b74060899a6299840c453e62859e99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef __cplusplus extern "C" { #endif #include "cd_cuda_kernel.h" __global__ void cd_cuda_forward_kernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ const int batch=512; __shared__ float buf[batch*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int k2=0;k2<m;k2+=batch){ int end_k=min(m,k2+batch)-k2; for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){ buf[j]=xyz2[(i*m+k2)*3+j]; } __syncthreads(); for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz[(i*n+j)*3+0]; float y1=xyz[(i*n+j)*3+1]; float z1=xyz[(i*n+j)*3+2]; int best_i=0; float best=0; int end_ka=end_k-(end_k&3); if (end_ka==batch){ for (int k=0;k<batch;k+=4){ { float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } { float x2=buf[k*3+3]-x1; float y2=buf[k*3+4]-y1; float z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1; } } { float x2=buf[k*3+6]-x1; float y2=buf[k*3+7]-y1; float z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2; } } { float x2=buf[k*3+9]-x1; float y2=buf[k*3+10]-y1; float z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3; } } } }else{ for (int k=0;k<end_ka;k+=4){ { float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } { float x2=buf[k*3+3]-x1; float y2=buf[k*3+4]-y1; float z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1; } } { float x2=buf[k*3+6]-x1; float y2=buf[k*3+7]-y1; float z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2; } } { float x2=buf[k*3+9]-x1; float y2=buf[k*3+10]-y1; float z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3; } } } } for (int k=end_ka;k<end_k;k++){ float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } if (k2==0 || result[(i*n+j)]>best){ result[(i*n+j)]=best; result_i[(i*n+j)]=best_i; } } __syncthreads(); } } } int cd_forward_Launcher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){ hipLaunchKernelGGL(( cd_cuda_forward_kernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,n,xyz,m,xyz2,result,result_i); hipLaunchKernelGGL(( cd_cuda_forward_kernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,m,xyz2,n,xyz,result2,result2_i); return 1; } __global__ void cd_cuda_backward_kernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; int j2=idx1[i*n+j]; float x2=xyz2[(i*m+j2)*3+0]; float y2=xyz2[(i*m+j2)*3+1]; float z2=xyz2[(i*m+j2)*3+2]; float g=grad_dist1[i*n+j]*2; atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2)); atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2))); } } } int cd_backward_Launcher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2){ hipLaunchKernelGGL(( cd_cuda_backward_kernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2); hipLaunchKernelGGL(( cd_cuda_backward_kernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1); return 1; } #ifdef __cplusplus } #endif
b6156c4677b74060899a6299840c453e62859e99.cu
#ifdef __cplusplus extern "C" { #endif #include "cd_cuda_kernel.h" __global__ void cd_cuda_forward_kernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ const int batch=512; __shared__ float buf[batch*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int k2=0;k2<m;k2+=batch){ int end_k=min(m,k2+batch)-k2; for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){ buf[j]=xyz2[(i*m+k2)*3+j]; } __syncthreads(); for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz[(i*n+j)*3+0]; float y1=xyz[(i*n+j)*3+1]; float z1=xyz[(i*n+j)*3+2]; int best_i=0; float best=0; int end_ka=end_k-(end_k&3); if (end_ka==batch){ for (int k=0;k<batch;k+=4){ { float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } { float x2=buf[k*3+3]-x1; float y2=buf[k*3+4]-y1; float z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1; } } { float x2=buf[k*3+6]-x1; float y2=buf[k*3+7]-y1; float z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2; } } { float x2=buf[k*3+9]-x1; float y2=buf[k*3+10]-y1; float z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3; } } } }else{ for (int k=0;k<end_ka;k+=4){ { float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } { float x2=buf[k*3+3]-x1; float y2=buf[k*3+4]-y1; float z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1; } } { float x2=buf[k*3+6]-x1; float y2=buf[k*3+7]-y1; float z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2; } } { float x2=buf[k*3+9]-x1; float y2=buf[k*3+10]-y1; float z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3; } } } } for (int k=end_ka;k<end_k;k++){ float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } if (k2==0 || result[(i*n+j)]>best){ result[(i*n+j)]=best; result_i[(i*n+j)]=best_i; } } __syncthreads(); } } } int cd_forward_Launcher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){ cd_cuda_forward_kernel<<<dim3(32,16,1),512>>>(b,n,xyz,m,xyz2,result,result_i); cd_cuda_forward_kernel<<<dim3(32,16,1),512>>>(b,m,xyz2,n,xyz,result2,result2_i); return 1; } __global__ void cd_cuda_backward_kernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; int j2=idx1[i*n+j]; float x2=xyz2[(i*m+j2)*3+0]; float y2=xyz2[(i*m+j2)*3+1]; float z2=xyz2[(i*m+j2)*3+2]; float g=grad_dist1[i*n+j]*2; atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2)); atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2))); } } } int cd_backward_Launcher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2){ cd_cuda_backward_kernel<<<dim3(1,16,1),256>>>(b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2); cd_cuda_backward_kernel<<<dim3(1,16,1),256>>>(b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1); return 1; } #ifdef __cplusplus } #endif
41da4935b2a738781da3764a77fe2a358f6ba9e7.hip
// !!! This is a file automatically generated by hipify!!! /* Sparse Blocks Network Copyright (c) 2017, Uber Technologies, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef GOOGLE_CUDA #define EIGEN_USE_GPU #define EIGEN_USE_THREADS #include "sparse_gather.h" #include "sparse_blocks.cu.h" // #include "tensorflow/core/util/cuda_kernel_helper.h" #include "cuda_helpers.h" #include "hip/hip_runtime.h" #include "op_utils.h" using namespace tensorflow; using std::cout; using std::endl; #define COMPUTE_R1(RR) ((RR) < 7 ? ((RR) == 1 ? 1 : 2) : 4) typedef Eigen::GpuDevice GPUDevice; namespace { struct LaunchParams { dim3 block, grid; int shmemSize; int bSzH1; int fittingC1; enum { MAX_SHMEM = 24*1024 }; LaunchParams(int C, int bSzH, int bSzW, int numActive) { fittingC1 = ::min(32, C); bSzH1 = COMPUTE_R1(bSzH); while ((shmemSize = (fittingC1+1)*bSzH1*bSzW*sizeof(float)) > MAX_SHMEM) fittingC1--; assert(fittingC1 >= 1); assert(bSzH1*bSzW*(fittingC1+1)*sizeof(float) <= MAX_SHMEM); block = dim3(512, 1, 1); grid = dim3(numActive, DIVUP(C, fittingC1), DIVUP(bSzH, bSzH1)); } }; } // Define the GPU implementation that launches the CUDA kernel. template <typename T> struct SparseGatherFunctor<GPUDevice, T> { void operator()( const GPUDevice& d, const T* x, int N, int H, int W, int C, T* y, int bOffsH0, int bOffsW0, int bSzH, int bSzW, int bStrH, int bStrW, int numActive, const short* activeBlockIndices, bool transpose ) { LaunchParams lp(C, bSzH, bSzW, numActive); bool hasInst = false; #if 1 #define CALL(RR, CC1, trans) \ if (bSzH == RR && bSzW == RR && lp.fittingC1 == CC1) { \ hasInst = true; \ hipLaunchKernelGGL(( blockGatherTiled0<512, RR, COMPUTE_R1(RR), RR, CC1, trans>), dim3(lp.grid), dim3(lp.block), lp.shmemSize, d.stream(), \ x, (const short*)activeBlockIndices, \ y, N, H, W, C, bOffsH0, bOffsW0, bStrH, bStrW); \ } else #define SIZE_TEMPLATES(transt, CCC) \ CALL( 1, CCC, transt) \ CALL( 2, CCC, transt) \ CALL( 3, CCC, transt) \ CALL( 4, CCC, transt) \ CALL( 5, CCC, transt) \ CALL( 6, CCC, transt) \ CALL( 7, CCC, transt) \ CALL( 8, CCC, transt) \ CALL( 9, CCC, transt) \ CALL(10, CCC, transt) \ CALL(11, CCC, transt) \ CALL(12, CCC, transt) \ CALL(13, CCC, transt) \ CALL(14, CCC, transt) \ CALL(15, CCC, transt) \ CALL(16, CCC, transt) \ CALL(17, CCC, transt) \ CALL(18, CCC, transt) \ CALL(19, CCC, transt) \ CALL(20, CCC, transt) \ CALL(21, CCC, transt) \ CALL(22, CCC, transt) \ CALL(23, CCC, transt) \ CALL(24, CCC, transt) \ CALL(25, CCC, transt) \ CALL(26, CCC, transt) \ CALL(27, CCC, transt) \ CALL(28, CCC, transt) \ CALL(29, CCC, transt) \ CALL(30, CCC, transt) \ CALL(31, CCC, transt) \ CALL(32, CCC, transt) \ CALL(33, CCC, transt) \ CALL(34, CCC, transt) \ CALL(41, CCC, transt) \ CALL(48, CCC, transt) \ CALL(63, CCC, transt) \ CALL(64, CCC, transt) \ CALL(65, CCC, transt) \ CALL(81, CCC, transt) \ { hasInst = false; } if (transpose) { if (lp.fittingC1 >= 32) { SIZE_TEMPLATES(true, 32) } else if (lp.fittingC1 == 16) { SIZE_TEMPLATES(true, 16) } else if (lp.fittingC1 == 24) { SIZE_TEMPLATES(true, 24) } } else { if (lp.fittingC1 >= 32) { SIZE_TEMPLATES(false, 32) } else if (lp.fittingC1 == 16) { SIZE_TEMPLATES(false, 16) } else if (lp.fittingC1 == 24) { SIZE_TEMPLATES(false, 24) } } #endif if (!hasInst) { //printf("gather, C, bSzH, bSzW=%d, %d, %d, fittingC1=%d\n", C, bSzH, bSzW, lp.fittingC1); hipLaunchKernelGGL(( blockGatherTiled1<512>), dim3(lp.grid), dim3(lp.block), lp.shmemSize, d.stream(), x, (const short*)activeBlockIndices, y, N, H, W, C, bOffsH0, bOffsW0, bStrH, bStrW, bSzH, lp.bSzH1, bSzW, lp.fittingC1, transpose); } #undef SIZE_TEMPLATES #undef CALL gpuErrorCheck( hipPeekAtLastError() ); } }; // Define the GPU implementation that launches the CUDA kernel. template <typename T> struct SparseScatterFunctor<GPUDevice, T> { void operator()( const GPUDevice& d, const T* x, int N, int H, int W, int C, T* y, int bOffsH0, int bOffsW0, int bSzH, int bSzW, int bStrH, int bStrW, int numActive, const short* activeBlockIndices, bool add, bool transpose, bool atomic ) { LaunchParams lp(C, bSzH, bSzW, numActive); bool hasInst = false; #if 1 #define CALL(RR, CC1, addt, transt) \ if (bSzH == RR && bSzW == RR && lp.fittingC1 == CC1 && atomic == false) { \ hasInst = true; \ hipLaunchKernelGGL(( blockScatterTiled0<512, RR, COMPUTE_R1(RR), RR, CC1, addt, transt, false>) \ , dim3(lp.grid), dim3(lp.block), lp.shmemSize, d.stream(), \ x, (const short*)activeBlockIndices, \ y, N, H, W, C, bOffsH0, bOffsW0, bStrH, bStrW); \ } else #define SIZE_TEMPLATES(addt, transpt, CCC) \ CALL( 1, CCC, addt, transpt) \ CALL( 2, CCC, addt, transpt) \ CALL( 3, CCC, addt, transpt) \ CALL( 4, CCC, addt, transpt) \ CALL( 5, CCC, addt, transpt) \ CALL( 6, CCC, addt, transpt) \ CALL( 7, CCC, addt, transpt) \ CALL( 8, CCC, addt, transpt) \ CALL( 9, CCC, addt, transpt) \ CALL(10, CCC, addt, transpt) \ CALL(11, CCC, addt, transpt) \ CALL(12, CCC, addt, transpt) \ CALL(13, CCC, addt, transpt) \ CALL(14, CCC, addt, transpt) \ CALL(15, CCC, addt, transpt) \ CALL(16, CCC, addt, transpt) \ CALL(17, CCC, addt, transpt) \ CALL(18, CCC, addt, transpt) \ CALL(19, CCC, addt, transpt) \ CALL(20, CCC, addt, transpt) \ CALL(21, CCC, addt, transpt) \ CALL(22, CCC, addt, transpt) \ CALL(23, CCC, addt, transpt) \ CALL(24, CCC, addt, transpt) \ CALL(25, CCC, addt, transpt) \ CALL(26, CCC, addt, transpt) \ CALL(27, CCC, addt, transpt) \ CALL(28, CCC, addt, transpt) \ CALL(29, CCC, addt, transpt) \ CALL(30, CCC, addt, transpt) \ CALL(31, CCC, addt, transpt) \ CALL(32, CCC, addt, transpt) \ CALL(33, CCC, addt, transpt) \ CALL(34, CCC, addt, transpt) \ CALL(41, CCC, addt, transpt) \ CALL(48, CCC, addt, transpt) \ CALL(63, CCC, addt, transpt) \ CALL(64, CCC, addt, transpt) \ CALL(65, CCC, addt, transpt) \ CALL(81, CCC, addt, transpt) \ hasInst = false; if (transpose && !add) { if (lp.fittingC1 >= 32) { SIZE_TEMPLATES(false, true, 32) } else if (lp.fittingC1 == 16) { SIZE_TEMPLATES(false, true, 16) } else if (lp.fittingC1 == 24) { SIZE_TEMPLATES(false, true, 24) } } else if (transpose && add) { if (lp.fittingC1 >= 32) { SIZE_TEMPLATES(true, true, 32) } else if (lp.fittingC1 == 16) { SIZE_TEMPLATES(true, true, 16) } else if (lp.fittingC1 == 24) { SIZE_TEMPLATES(true, true, 24) } } else if (!transpose && !add) { if (lp.fittingC1 >= 32) { SIZE_TEMPLATES(false, false, 32) } else if (lp.fittingC1 == 16) { SIZE_TEMPLATES(false, false, 16) } else if (lp.fittingC1 == 24) { SIZE_TEMPLATES(false, false, 24) } } else { if (lp.fittingC1 >= 32) { SIZE_TEMPLATES(true, false, 32) } else if (lp.fittingC1 == 16) { SIZE_TEMPLATES(true, false, 16) } else if (lp.fittingC1 == 24) { SIZE_TEMPLATES(true, false, 24) } } #endif if (!hasInst) { //printf("scatter, C, bSzH, bSzW=%d, %d, %d, fittingC1=%d\n", C, bSzH, bSzW, lp.fittingC1); hipLaunchKernelGGL(( blockScatterTiled1<512>), dim3(lp.grid), dim3(lp.block), lp.shmemSize, d.stream(), x, (const short*)activeBlockIndices, y, N, H, W, C, bOffsH0, bOffsW0, bStrH, bStrW, bSzH, lp.bSzH1, bSzW, lp.fittingC1, add, transpose, atomic); } #undef SIZE_TEMPLATES #undef CALL gpuErrorCheck( hipPeekAtLastError() ); } }; template<typename T> struct CopyTensorFunctor<GPUDevice, T> { void operator()(const GPUDevice& gpu, T* dst, const T* src, int count) { hipMemcpyAsync(dst, src, sizeof(T)*count, hipMemcpyDeviceToDevice, gpu.stream()); gpuErrorCheck( hipPeekAtLastError() ); hipStreamSynchronize(gpu.stream()); gpuErrorCheck( hipPeekAtLastError() ); } const hipStream_t* getStream(const GPUDevice& gpu) { return &gpu.stream(); } }; template struct CopyTensorFunctor<GPUDevice, float>; template struct SparseGatherFunctor<GPUDevice, float>; template struct SparseScatterFunctor<GPUDevice, float>; #endif // GOOGLE_CUDA
41da4935b2a738781da3764a77fe2a358f6ba9e7.cu
/* Sparse Blocks Network Copyright (c) 2017, Uber Technologies, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef GOOGLE_CUDA #define EIGEN_USE_GPU #define EIGEN_USE_THREADS #include "sparse_gather.h" #include "sparse_blocks.cu.h" // #include "tensorflow/core/util/cuda_kernel_helper.h" #include "cuda_helpers.h" #include "cuda_runtime.h" #include "op_utils.h" using namespace tensorflow; using std::cout; using std::endl; #define COMPUTE_R1(RR) ((RR) < 7 ? ((RR) == 1 ? 1 : 2) : 4) typedef Eigen::GpuDevice GPUDevice; namespace { struct LaunchParams { dim3 block, grid; int shmemSize; int bSzH1; int fittingC1; enum { MAX_SHMEM = 24*1024 }; LaunchParams(int C, int bSzH, int bSzW, int numActive) { fittingC1 = std::min(32, C); bSzH1 = COMPUTE_R1(bSzH); while ((shmemSize = (fittingC1+1)*bSzH1*bSzW*sizeof(float)) > MAX_SHMEM) fittingC1--; assert(fittingC1 >= 1); assert(bSzH1*bSzW*(fittingC1+1)*sizeof(float) <= MAX_SHMEM); block = dim3(512, 1, 1); grid = dim3(numActive, DIVUP(C, fittingC1), DIVUP(bSzH, bSzH1)); } }; } // Define the GPU implementation that launches the CUDA kernel. template <typename T> struct SparseGatherFunctor<GPUDevice, T> { void operator()( const GPUDevice& d, const T* x, int N, int H, int W, int C, T* y, int bOffsH0, int bOffsW0, int bSzH, int bSzW, int bStrH, int bStrW, int numActive, const short* activeBlockIndices, bool transpose ) { LaunchParams lp(C, bSzH, bSzW, numActive); bool hasInst = false; #if 1 #define CALL(RR, CC1, trans) \ if (bSzH == RR && bSzW == RR && lp.fittingC1 == CC1) { \ hasInst = true; \ blockGatherTiled0<512, RR, COMPUTE_R1(RR), RR, CC1, trans><<<lp.grid, lp.block, lp.shmemSize, d.stream()>>>( \ x, (const short*)activeBlockIndices, \ y, N, H, W, C, bOffsH0, bOffsW0, bStrH, bStrW); \ } else #define SIZE_TEMPLATES(transt, CCC) \ CALL( 1, CCC, transt) \ CALL( 2, CCC, transt) \ CALL( 3, CCC, transt) \ CALL( 4, CCC, transt) \ CALL( 5, CCC, transt) \ CALL( 6, CCC, transt) \ CALL( 7, CCC, transt) \ CALL( 8, CCC, transt) \ CALL( 9, CCC, transt) \ CALL(10, CCC, transt) \ CALL(11, CCC, transt) \ CALL(12, CCC, transt) \ CALL(13, CCC, transt) \ CALL(14, CCC, transt) \ CALL(15, CCC, transt) \ CALL(16, CCC, transt) \ CALL(17, CCC, transt) \ CALL(18, CCC, transt) \ CALL(19, CCC, transt) \ CALL(20, CCC, transt) \ CALL(21, CCC, transt) \ CALL(22, CCC, transt) \ CALL(23, CCC, transt) \ CALL(24, CCC, transt) \ CALL(25, CCC, transt) \ CALL(26, CCC, transt) \ CALL(27, CCC, transt) \ CALL(28, CCC, transt) \ CALL(29, CCC, transt) \ CALL(30, CCC, transt) \ CALL(31, CCC, transt) \ CALL(32, CCC, transt) \ CALL(33, CCC, transt) \ CALL(34, CCC, transt) \ CALL(41, CCC, transt) \ CALL(48, CCC, transt) \ CALL(63, CCC, transt) \ CALL(64, CCC, transt) \ CALL(65, CCC, transt) \ CALL(81, CCC, transt) \ { hasInst = false; } if (transpose) { if (lp.fittingC1 >= 32) { SIZE_TEMPLATES(true, 32) } else if (lp.fittingC1 == 16) { SIZE_TEMPLATES(true, 16) } else if (lp.fittingC1 == 24) { SIZE_TEMPLATES(true, 24) } } else { if (lp.fittingC1 >= 32) { SIZE_TEMPLATES(false, 32) } else if (lp.fittingC1 == 16) { SIZE_TEMPLATES(false, 16) } else if (lp.fittingC1 == 24) { SIZE_TEMPLATES(false, 24) } } #endif if (!hasInst) { //printf("gather, C, bSzH, bSzW=%d, %d, %d, fittingC1=%d\n", C, bSzH, bSzW, lp.fittingC1); blockGatherTiled1<512><<<lp.grid, lp.block, lp.shmemSize, d.stream()>>>( x, (const short*)activeBlockIndices, y, N, H, W, C, bOffsH0, bOffsW0, bStrH, bStrW, bSzH, lp.bSzH1, bSzW, lp.fittingC1, transpose); } #undef SIZE_TEMPLATES #undef CALL gpuErrorCheck( cudaPeekAtLastError() ); } }; // Define the GPU implementation that launches the CUDA kernel. template <typename T> struct SparseScatterFunctor<GPUDevice, T> { void operator()( const GPUDevice& d, const T* x, int N, int H, int W, int C, T* y, int bOffsH0, int bOffsW0, int bSzH, int bSzW, int bStrH, int bStrW, int numActive, const short* activeBlockIndices, bool add, bool transpose, bool atomic ) { LaunchParams lp(C, bSzH, bSzW, numActive); bool hasInst = false; #if 1 #define CALL(RR, CC1, addt, transt) \ if (bSzH == RR && bSzW == RR && lp.fittingC1 == CC1 && atomic == false) { \ hasInst = true; \ blockScatterTiled0<512, RR, COMPUTE_R1(RR), RR, CC1, addt, transt, false> \ <<<lp.grid, lp.block, lp.shmemSize, d.stream()>>>( \ x, (const short*)activeBlockIndices, \ y, N, H, W, C, bOffsH0, bOffsW0, bStrH, bStrW); \ } else #define SIZE_TEMPLATES(addt, transpt, CCC) \ CALL( 1, CCC, addt, transpt) \ CALL( 2, CCC, addt, transpt) \ CALL( 3, CCC, addt, transpt) \ CALL( 4, CCC, addt, transpt) \ CALL( 5, CCC, addt, transpt) \ CALL( 6, CCC, addt, transpt) \ CALL( 7, CCC, addt, transpt) \ CALL( 8, CCC, addt, transpt) \ CALL( 9, CCC, addt, transpt) \ CALL(10, CCC, addt, transpt) \ CALL(11, CCC, addt, transpt) \ CALL(12, CCC, addt, transpt) \ CALL(13, CCC, addt, transpt) \ CALL(14, CCC, addt, transpt) \ CALL(15, CCC, addt, transpt) \ CALL(16, CCC, addt, transpt) \ CALL(17, CCC, addt, transpt) \ CALL(18, CCC, addt, transpt) \ CALL(19, CCC, addt, transpt) \ CALL(20, CCC, addt, transpt) \ CALL(21, CCC, addt, transpt) \ CALL(22, CCC, addt, transpt) \ CALL(23, CCC, addt, transpt) \ CALL(24, CCC, addt, transpt) \ CALL(25, CCC, addt, transpt) \ CALL(26, CCC, addt, transpt) \ CALL(27, CCC, addt, transpt) \ CALL(28, CCC, addt, transpt) \ CALL(29, CCC, addt, transpt) \ CALL(30, CCC, addt, transpt) \ CALL(31, CCC, addt, transpt) \ CALL(32, CCC, addt, transpt) \ CALL(33, CCC, addt, transpt) \ CALL(34, CCC, addt, transpt) \ CALL(41, CCC, addt, transpt) \ CALL(48, CCC, addt, transpt) \ CALL(63, CCC, addt, transpt) \ CALL(64, CCC, addt, transpt) \ CALL(65, CCC, addt, transpt) \ CALL(81, CCC, addt, transpt) \ hasInst = false; if (transpose && !add) { if (lp.fittingC1 >= 32) { SIZE_TEMPLATES(false, true, 32) } else if (lp.fittingC1 == 16) { SIZE_TEMPLATES(false, true, 16) } else if (lp.fittingC1 == 24) { SIZE_TEMPLATES(false, true, 24) } } else if (transpose && add) { if (lp.fittingC1 >= 32) { SIZE_TEMPLATES(true, true, 32) } else if (lp.fittingC1 == 16) { SIZE_TEMPLATES(true, true, 16) } else if (lp.fittingC1 == 24) { SIZE_TEMPLATES(true, true, 24) } } else if (!transpose && !add) { if (lp.fittingC1 >= 32) { SIZE_TEMPLATES(false, false, 32) } else if (lp.fittingC1 == 16) { SIZE_TEMPLATES(false, false, 16) } else if (lp.fittingC1 == 24) { SIZE_TEMPLATES(false, false, 24) } } else { if (lp.fittingC1 >= 32) { SIZE_TEMPLATES(true, false, 32) } else if (lp.fittingC1 == 16) { SIZE_TEMPLATES(true, false, 16) } else if (lp.fittingC1 == 24) { SIZE_TEMPLATES(true, false, 24) } } #endif if (!hasInst) { //printf("scatter, C, bSzH, bSzW=%d, %d, %d, fittingC1=%d\n", C, bSzH, bSzW, lp.fittingC1); blockScatterTiled1<512><<<lp.grid, lp.block, lp.shmemSize, d.stream()>>>( x, (const short*)activeBlockIndices, y, N, H, W, C, bOffsH0, bOffsW0, bStrH, bStrW, bSzH, lp.bSzH1, bSzW, lp.fittingC1, add, transpose, atomic); } #undef SIZE_TEMPLATES #undef CALL gpuErrorCheck( cudaPeekAtLastError() ); } }; template<typename T> struct CopyTensorFunctor<GPUDevice, T> { void operator()(const GPUDevice& gpu, T* dst, const T* src, int count) { cudaMemcpyAsync(dst, src, sizeof(T)*count, cudaMemcpyDeviceToDevice, gpu.stream()); gpuErrorCheck( cudaPeekAtLastError() ); cudaStreamSynchronize(gpu.stream()); gpuErrorCheck( cudaPeekAtLastError() ); } const cudaStream_t* getStream(const GPUDevice& gpu) { return &gpu.stream(); } }; template struct CopyTensorFunctor<GPUDevice, float>; template struct SparseGatherFunctor<GPUDevice, float>; template struct SparseScatterFunctor<GPUDevice, float>; #endif // GOOGLE_CUDA
a81ed9dc0876f06070653bd9e1000cd32581c76c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread. * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise * targets: (numColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * Number of filters must be divisible by 16. * Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. */ #include "cudamat_conv.cuh" #include "cudamat_conv_util.cuh" template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_color(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[numColors*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int blockCaseIdx = blockIdx.x * 16*imgsPerThread; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const int numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeX * imgSizeY; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + loadY * numImages * numModules + loadX; filters += threadIdx.x; targets += pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[numColors][imgsPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f]; #pragma unroll for (int c = 0; c < numColors; c++) { shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread) * blockIdx.x.y = 1..numImgColors/colorsPerThread * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * numImageColors/numGroups must be divisible by colorsPerThread. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are 4-16 color channels. */ template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,16*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const uint numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX; filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x; targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by filterCache. * * B_X * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by 32. * filterCache must be divisible by B_X*B_Y/32 * B_X*B_Y must be divisible by filterCache * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads filterCache weights at a time, so those aren't fully coalesced (depending on size of filterCache). * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCache, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCache + 1]; __shared__ float shHidActs[filterCache][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32; const int filtersLoadY = tidx / filterCache, filtersLoadX = tidx % filterCache; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += filterCache) { // multiply with filterCache filters at a time const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * B_X; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCache) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCache) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * (filterCache + 1)] = fLoad[i * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int w = 0; w < filterCache; w++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by filterCacheF. * * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by filterCacheF * filterCacheF must be divisible by filterCacheH * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads filterCacheF weights at a time, so those aren't fully coalesced (depending on size of filterCacheF). * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor_kepler(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = threadIdx.y, hidActLoadX = threadIdx.x; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; //const bool noFLoop = filterCacheF == filterCacheH; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = fLoad[i * filterPixels * numFilters]; } } //#pragma unroll for (int fh = f; fh < f + filterCacheF; fh += filterCacheH) { //conv_img_acts_manycolor_dummy_fhLoop<B_Y, B_X, imgsPerThread, colorsPerThread, filterCacheF, filterCacheH, checkCaseBounds>(hidActs, shHidActLoad, shHidActs, shFilters, moduleIdx, numImages, hidActLoadY, hidActLoadX, blockCaseIdx, numModules, f, fh, prod); const float* hLoad = &hidActs[(moduleIdx + fh * numModules) * numImages]; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || hidActLoadY + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread*B_X; i += B_X) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } else { shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } } __syncthreads(); // Do some actual computation // Using these variables causes register usage to go from 161 --> 123. // But nonetheless, the high-register version is faster. //const float* shF = &shFilters[threadIdx.y][fh-f]; //const float* const shF2 = &shFilters[threadIdx.y][fh]; //const float* shH = &shHidActs[0][threadIdx.x]; #pragma unroll for (int w = 0; w < filterCacheH; w++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][fh-f + w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * New Titan-optimized stuff. */ __device__ __forceinline__ void conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(const int my, const int mx, const int numModulesX, const int paddingStart, const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX, const int filterSize, int &moduleIdx, int &pxIdxInFilter) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; moduleIdx = my * numModulesX + mx; // out const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out } #define IA_PRELOAD_LOOP(w,offset) _Pragma("unroll") \ for (int i = 0; i < imgsPerThread; i++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ /* * Same loop as above but inverted. */ #define IA_PRELOAD_LOOP2(w,offset) _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ _Pragma("unroll") \ for (int i = 0; i < imgsPerThread; i++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ #define IA_PRELOAD_LOOP3(i,offset) _Pragma("unroll") \ for (int w = 0; w < filterCacheH; w++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ #define IA_PRELOAD_W(z) wPreload[z] = fLoad[(z) * B_X*B_Y/filterCacheF * filterPixels * numFilters]; #define IA_PRELOAD_W_TX(z) wPreload[z] = tex1Dfetch<float>(filters, filtersLoadOffset + (z) * B_X*B_Y/filterCacheF * filterPixels * numFilters); #define IA_PRELOAD_H(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \ hPreload[y][x] = hLoad[(y) * B_Y * numModules * numImages + (x) * B_X]; \ } #define IA_PRELOAD_H_TX(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \ hPreload[y][x] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + (y) * B_Y * numModules * numImages + (x) * B_X); \ } template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void __launch_bounds__(256, 2) // 256 threads per block, 2 blocks per multiprocessor // These launch bounds ensure 25% occupancy (128 registers used) // as oppposed to 13% (130 registers) achieved by defaults. conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(hipTextureObject_t hidActs, hipTextureObject_t filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int myCaseIdx = blockCaseIdx + threadIdx.x; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; // const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; // hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; // filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread]; //const bool noFLoop = filterCacheF == filterCacheH; /* * Initial preload */ float hPreload[filterCacheH/B_Y][imgsPerThread]; // [2][4] float wPreload[filterCacheF*colorsPerThread/B_X]; // [8] int moduleIdx, pxIdxInFilter; conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter); // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0]; int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + 0 : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters); #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters); } } // const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages]; int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X); } } } } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext; const bool lastModule = my == endY - 1 && mx == endX - 1; if (!lastModule) { mxNext = mx + 1 == endX ? startX : mx + 1; myNext = my + (mx + 1 == endX); } conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext); for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)]; } } filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF); if (f == numFiltersPerGroup - filterCacheF) { filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters : moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters); } #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } __syncthreads(); hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheH) * numModules) * numImages; #pragma unroll for (int z = 0; z < 4; ++z) { IA_PRELOAD_LOOP(z,0); IA_PRELOAD_W_TX(z); } #pragma unroll for (int z = 4; z < 12; ++z) { IA_PRELOAD_LOOP(z,0); IA_PRELOAD_H_TX((z-4)/4,z%4); } #pragma unroll for (int z = 12; z < 16; ++z) { IA_PRELOAD_LOOP(z,0); } __syncthreads(); #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } __syncthreads(); hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages; if (f == numFiltersPerGroup - filterCacheF) { hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages; } #pragma unroll for (int z = 0; z < 4; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); IA_PRELOAD_W_TX(z+4); } #pragma unroll for (int z = 4; z < 12; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); IA_PRELOAD_H_TX((z-4)/4, z%4); } #pragma unroll for (int z = 12; z < 16; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void //__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(hipTextureObject_t hidActs, hipTextureObject_t filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int myCaseIdx = blockCaseIdx + threadIdx.x; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; // const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; // hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; // filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread]; //const bool noFLoop = filterCacheF == filterCacheH; /* * Initial preload */ float hPreload[filterCacheH/B_Y][imgsPerThread]; // [4][4] float wPreload[filterCacheF*colorsPerThread/B_X]; // [6] int moduleIdx, pxIdxInFilter; conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter); // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0]; int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters); #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters); } } // const float* hLoad = &hidActs[moduleIdx * numImages]; int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X); } } } } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext; const bool lastModule = my == endY - 1 && mx == endX - 1; if (!lastModule) { mxNext = mx + 1 == endX ? startX : mx + 1; myNext = my + (mx + 1 == endX); } conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext); for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)]; } } filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF); if (f == numFiltersPerGroup - filterCacheF) { filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters : moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters); } #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages; if (f == numFiltersPerGroup - filterCacheF) { hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages; } __syncthreads(); // It seems that there is no point explicitly interleaving loads // and computations because the scheduler does that anyway. IA_PRELOAD_LOOP2(0,0); IA_PRELOAD_LOOP2(1,0); IA_PRELOAD_LOOP2(2,0); IA_PRELOAD_LOOP2(3,0); IA_PRELOAD_LOOP2(4,0); IA_PRELOAD_LOOP2(5,0); IA_PRELOAD_LOOP2(6,0); IA_PRELOAD_LOOP2(7,0); IA_PRELOAD_LOOP2(8,0); IA_PRELOAD_LOOP2(9,0); IA_PRELOAD_LOOP2(10,0); IA_PRELOAD_LOOP2(11,0); IA_PRELOAD_LOOP2(12,0); IA_PRELOAD_LOOP2(13,0); IA_PRELOAD_LOOP2(14,0); IA_PRELOAD_LOOP2(15,0); IA_PRELOAD_W_TX(0); IA_PRELOAD_W_TX(1); IA_PRELOAD_W_TX(2); IA_PRELOAD_W_TX(3); IA_PRELOAD_W_TX(4); IA_PRELOAD_W_TX(5); IA_PRELOAD_H_TX(0,0); IA_PRELOAD_H_TX(0,1); IA_PRELOAD_H_TX(0,2); IA_PRELOAD_H_TX(0,3); IA_PRELOAD_H_TX(1,0); IA_PRELOAD_H_TX(1,1); IA_PRELOAD_H_TX(1,2); IA_PRELOAD_H_TX(1,3); IA_PRELOAD_H_TX(2,0); IA_PRELOAD_H_TX(2,1); IA_PRELOAD_H_TX(2,2); IA_PRELOAD_H_TX(2,3); IA_PRELOAD_H_TX(3,0); IA_PRELOAD_H_TX(3,1); IA_PRELOAD_H_TX(3,2); IA_PRELOAD_H_TX(3,3); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * hidActs: (numFilters, numModules, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgPixels, numImages) * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _imgActs(cudamat* hidActs, cudamat* filters, cudamat* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numImages = hidActs->size[0]; int numFilters = filters->size[0]; int numModules = hidActs->size[1] / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters->size[1] / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSizeY * imgSizeX; int numModulesX = numModules / numModulesY; assert(numImgColors % numGroups == 0); assert(numFilters % (32*numGroups) == 0); // TODO: insisting on 32 filters due to bug in calling code below. fix that. assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 4 == 0); assert(filterPixels == filterSize * filterSize); assert(hidActs->size[1] == numModules * numFilters); assert(filters->size[1] == filterModuleMult * numFilterColors * filterPixels); assert(numModules == numModulesY * numModulesX); //assert(hidActs.isContiguous()); //assert(filters.isContiguous()); assert(!hidActs->is_trans); assert(!filters->is_trans); assert(!targets->is_trans); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0); assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); assert(moduleStride <= filterSize); //assert(targets.isContiguous()); // no stride support here! dim3 blocks; dim3 threads; int colorsPerThread, imgsPerThread; if (numFilterColors % 8 == 0) { threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4); colorsPerThread = numFilterColors % 64 == 0 ? 8 : numFilterColors % 48 == 0 ? 12 : numFilterColors % 32 == 0 ? 8 : numFilterColors % 16 == 0 ? 4 : 2; imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; assert(numFilterColors % (threads.y * colorsPerThread) == 0); blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels); // NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels % 64 != 0 has not been optimized!! } else if (numFilterColors > 3) { // NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!! imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } else { // NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!! imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; //if (scaleTargets == 0) { // do not scale or use targets matrix // targets.resize(numImgColors*imgPixels, numImages); //} else { assert(targets->size[1] == numImgColors * imgPixels); assert(targets->size[0] == numImages); //} const bool scale = scaleTargets != 0; hipStream_t stream = 0; //NVMatrix::getDefaultStream(); // hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); // hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, // getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, // imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); //return; // printf("conv: %d\n", conv); // printf("scale: %d\n", scale); // printf("checkCaseBounds: %d\n", checkCaseBounds); // printf("numFilterColors: %d\n", numFilterColors); // printf("numImages: %d\n", numImages); // hipStream_t stream = 0; // NVMatrix::getDefaultStream(); if (conv == true) { if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { // TODO: this code assumes we hvae 32 filters because it uses filter cache of 32! if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 3, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 3, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 3, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 3, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 1, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 1, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 1, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 1, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 3, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 3, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 3, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 3, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 1, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 1, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 1, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 1, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } else if (conv == false) { if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 3, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 3, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 1, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 1, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 3, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 3, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 3, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 3, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 1, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 1, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 1, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 1, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } getLastCudaError("imgActs: kernel execution failed"); } #ifdef __cplusplus extern "C" { #endif void convDown(cudamat* images, cudamat* filters, cudamat* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets){ _imgActs(images, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, 1, true); } void localDown(cudamat* images, cudamat* filters, cudamat* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets){ _imgActs(images, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, 1, false); } #ifdef __cplusplus } #endif
a81ed9dc0876f06070653bd9e1000cd32581c76c.cu
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread. * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise * targets: (numColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * Number of filters must be divisible by 16. * Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. */ #include "cudamat_conv.cuh" #include "cudamat_conv_util.cuh" template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_color(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[numColors*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int blockCaseIdx = blockIdx.x * 16*imgsPerThread; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const int numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeX * imgSizeY; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + loadY * numImages * numModules + loadX; filters += threadIdx.x; targets += pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[numColors][imgsPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f]; #pragma unroll for (int c = 0; c < numColors; c++) { shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread) * blockIdx.x.y = 1..numImgColors/colorsPerThread * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * numImageColors/numGroups must be divisible by colorsPerThread. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are 4-16 color channels. */ template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,16*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const uint numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX; filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x; targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by filterCache. * * B_X * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by 32. * filterCache must be divisible by B_X*B_Y/32 * B_X*B_Y must be divisible by filterCache * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads filterCache weights at a time, so those aren't fully coalesced (depending on size of filterCache). * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCache, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCache + 1]; __shared__ float shHidActs[filterCache][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32; const int filtersLoadY = tidx / filterCache, filtersLoadX = tidx % filterCache; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += filterCache) { // multiply with filterCache filters at a time const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * B_X; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCache) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCache) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * (filterCache + 1)] = fLoad[i * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int w = 0; w < filterCache; w++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by filterCacheF. * * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by filterCacheF * filterCacheF must be divisible by filterCacheH * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads filterCacheF weights at a time, so those aren't fully coalesced (depending on size of filterCacheF). * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor_kepler(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = threadIdx.y, hidActLoadX = threadIdx.x; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; //const bool noFLoop = filterCacheF == filterCacheH; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = fLoad[i * filterPixels * numFilters]; } } //#pragma unroll for (int fh = f; fh < f + filterCacheF; fh += filterCacheH) { //conv_img_acts_manycolor_dummy_fhLoop<B_Y, B_X, imgsPerThread, colorsPerThread, filterCacheF, filterCacheH, checkCaseBounds>(hidActs, shHidActLoad, shHidActs, shFilters, moduleIdx, numImages, hidActLoadY, hidActLoadX, blockCaseIdx, numModules, f, fh, prod); const float* hLoad = &hidActs[(moduleIdx + fh * numModules) * numImages]; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || hidActLoadY + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread*B_X; i += B_X) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } else { shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } } __syncthreads(); // Do some actual computation // Using these variables causes register usage to go from 161 --> 123. // But nonetheless, the high-register version is faster. //const float* shF = &shFilters[threadIdx.y][fh-f]; //const float* const shF2 = &shFilters[threadIdx.y][fh]; //const float* shH = &shHidActs[0][threadIdx.x]; #pragma unroll for (int w = 0; w < filterCacheH; w++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][fh-f + w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * New Titan-optimized stuff. */ __device__ __forceinline__ void conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(const int my, const int mx, const int numModulesX, const int paddingStart, const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX, const int filterSize, int &moduleIdx, int &pxIdxInFilter) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; moduleIdx = my * numModulesX + mx; // out const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out } #define IA_PRELOAD_LOOP(w,offset) _Pragma("unroll") \ for (int i = 0; i < imgsPerThread; i++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ /* * Same loop as above but inverted. */ #define IA_PRELOAD_LOOP2(w,offset) _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ _Pragma("unroll") \ for (int i = 0; i < imgsPerThread; i++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ #define IA_PRELOAD_LOOP3(i,offset) _Pragma("unroll") \ for (int w = 0; w < filterCacheH; w++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ #define IA_PRELOAD_W(z) wPreload[z] = fLoad[(z) * B_X*B_Y/filterCacheF * filterPixels * numFilters]; #define IA_PRELOAD_W_TX(z) wPreload[z] = tex1Dfetch<float>(filters, filtersLoadOffset + (z) * B_X*B_Y/filterCacheF * filterPixels * numFilters); #define IA_PRELOAD_H(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \ hPreload[y][x] = hLoad[(y) * B_Y * numModules * numImages + (x) * B_X]; \ } #define IA_PRELOAD_H_TX(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \ hPreload[y][x] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + (y) * B_Y * numModules * numImages + (x) * B_X); \ } template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void __launch_bounds__(256, 2) // 256 threads per block, 2 blocks per multiprocessor // These launch bounds ensure 25% occupancy (128 registers used) // as oppposed to 13% (130 registers) achieved by defaults. conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int myCaseIdx = blockCaseIdx + threadIdx.x; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; // const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; // hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; // filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread]; //const bool noFLoop = filterCacheF == filterCacheH; /* * Initial preload */ float hPreload[filterCacheH/B_Y][imgsPerThread]; // [2][4] float wPreload[filterCacheF*colorsPerThread/B_X]; // [8] int moduleIdx, pxIdxInFilter; conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter); // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0]; int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + 0 : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters); #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters); } } // const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages]; int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X); } } } } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext; const bool lastModule = my == endY - 1 && mx == endX - 1; if (!lastModule) { mxNext = mx + 1 == endX ? startX : mx + 1; myNext = my + (mx + 1 == endX); } conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext); for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)]; } } filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF); if (f == numFiltersPerGroup - filterCacheF) { filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters : moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters); } #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } __syncthreads(); hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheH) * numModules) * numImages; #pragma unroll for (int z = 0; z < 4; ++z) { IA_PRELOAD_LOOP(z,0); IA_PRELOAD_W_TX(z); } #pragma unroll for (int z = 4; z < 12; ++z) { IA_PRELOAD_LOOP(z,0); IA_PRELOAD_H_TX((z-4)/4,z%4); } #pragma unroll for (int z = 12; z < 16; ++z) { IA_PRELOAD_LOOP(z,0); } __syncthreads(); #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } __syncthreads(); hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages; if (f == numFiltersPerGroup - filterCacheF) { hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages; } #pragma unroll for (int z = 0; z < 4; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); IA_PRELOAD_W_TX(z+4); } #pragma unroll for (int z = 4; z < 12; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); IA_PRELOAD_H_TX((z-4)/4, z%4); } #pragma unroll for (int z = 12; z < 16; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void //__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int myCaseIdx = blockCaseIdx + threadIdx.x; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; // const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; // hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; // filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread]; //const bool noFLoop = filterCacheF == filterCacheH; /* * Initial preload */ float hPreload[filterCacheH/B_Y][imgsPerThread]; // [4][4] float wPreload[filterCacheF*colorsPerThread/B_X]; // [6] int moduleIdx, pxIdxInFilter; conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter); // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0]; int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters); #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters); } } // const float* hLoad = &hidActs[moduleIdx * numImages]; int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X); } } } } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext; const bool lastModule = my == endY - 1 && mx == endX - 1; if (!lastModule) { mxNext = mx + 1 == endX ? startX : mx + 1; myNext = my + (mx + 1 == endX); } conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext); for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)]; } } filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF); if (f == numFiltersPerGroup - filterCacheF) { filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters : moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters); } #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages; if (f == numFiltersPerGroup - filterCacheF) { hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages; } __syncthreads(); // It seems that there is no point explicitly interleaving loads // and computations because the scheduler does that anyway. IA_PRELOAD_LOOP2(0,0); IA_PRELOAD_LOOP2(1,0); IA_PRELOAD_LOOP2(2,0); IA_PRELOAD_LOOP2(3,0); IA_PRELOAD_LOOP2(4,0); IA_PRELOAD_LOOP2(5,0); IA_PRELOAD_LOOP2(6,0); IA_PRELOAD_LOOP2(7,0); IA_PRELOAD_LOOP2(8,0); IA_PRELOAD_LOOP2(9,0); IA_PRELOAD_LOOP2(10,0); IA_PRELOAD_LOOP2(11,0); IA_PRELOAD_LOOP2(12,0); IA_PRELOAD_LOOP2(13,0); IA_PRELOAD_LOOP2(14,0); IA_PRELOAD_LOOP2(15,0); IA_PRELOAD_W_TX(0); IA_PRELOAD_W_TX(1); IA_PRELOAD_W_TX(2); IA_PRELOAD_W_TX(3); IA_PRELOAD_W_TX(4); IA_PRELOAD_W_TX(5); IA_PRELOAD_H_TX(0,0); IA_PRELOAD_H_TX(0,1); IA_PRELOAD_H_TX(0,2); IA_PRELOAD_H_TX(0,3); IA_PRELOAD_H_TX(1,0); IA_PRELOAD_H_TX(1,1); IA_PRELOAD_H_TX(1,2); IA_PRELOAD_H_TX(1,3); IA_PRELOAD_H_TX(2,0); IA_PRELOAD_H_TX(2,1); IA_PRELOAD_H_TX(2,2); IA_PRELOAD_H_TX(2,3); IA_PRELOAD_H_TX(3,0); IA_PRELOAD_H_TX(3,1); IA_PRELOAD_H_TX(3,2); IA_PRELOAD_H_TX(3,3); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * hidActs: (numFilters, numModules, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgPixels, numImages) * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _imgActs(cudamat* hidActs, cudamat* filters, cudamat* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numImages = hidActs->size[0]; int numFilters = filters->size[0]; int numModules = hidActs->size[1] / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters->size[1] / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSizeY * imgSizeX; int numModulesX = numModules / numModulesY; assert(numImgColors % numGroups == 0); assert(numFilters % (32*numGroups) == 0); // TODO: insisting on 32 filters due to bug in calling code below. fix that. assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 4 == 0); assert(filterPixels == filterSize * filterSize); assert(hidActs->size[1] == numModules * numFilters); assert(filters->size[1] == filterModuleMult * numFilterColors * filterPixels); assert(numModules == numModulesY * numModulesX); //assert(hidActs.isContiguous()); //assert(filters.isContiguous()); assert(!hidActs->is_trans); assert(!filters->is_trans); assert(!targets->is_trans); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0); assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); assert(moduleStride <= filterSize); //assert(targets.isContiguous()); // no stride support here! dim3 blocks; dim3 threads; int colorsPerThread, imgsPerThread; if (numFilterColors % 8 == 0) { threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4); colorsPerThread = numFilterColors % 64 == 0 ? 8 : numFilterColors % 48 == 0 ? 12 : numFilterColors % 32 == 0 ? 8 : numFilterColors % 16 == 0 ? 4 : 2; imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; assert(numFilterColors % (threads.y * colorsPerThread) == 0); blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels); // NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels % 64 != 0 has not been optimized!! } else if (numFilterColors > 3) { // NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!! imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } else { // NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!! imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; //if (scaleTargets == 0) { // do not scale or use targets matrix // targets.resize(numImgColors*imgPixels, numImages); //} else { assert(targets->size[1] == numImgColors * imgPixels); assert(targets->size[0] == numImages); //} const bool scale = scaleTargets != 0; cudaStream_t stream = 0; //NVMatrix::getDefaultStream(); // cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); // conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>( // getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, // imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); //return; // printf("conv: %d\n", conv); // printf("scale: %d\n", scale); // printf("checkCaseBounds: %d\n", checkCaseBounds); // printf("numFilterColors: %d\n", numFilterColors); // printf("numImages: %d\n", numImages); // cudaStream_t stream = 0; // NVMatrix::getDefaultStream(); if (conv == true) { if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { // TODO: this code assumes we hvae 32 filters because it uses filter cache of 32! if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 8, 4, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 4, 4, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 3, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 3, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 3, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 3, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 1, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 1, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 1, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 1, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 8, 4, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 4, 4, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 3, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 3, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 3, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 3, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 1, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 1, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 1, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 1, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } else if (conv == false) { if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 8, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 4, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 3, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 3, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 1, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 1, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(getTextureObject(hidActs), getTextureObject(filters), targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 8, 4, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 4, 4, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 3, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 3, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 3, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 3, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 1, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 1, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 1, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 1, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } getLastCudaError("imgActs: kernel execution failed"); } #ifdef __cplusplus extern "C" { #endif void convDown(cudamat* images, cudamat* filters, cudamat* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets){ _imgActs(images, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, 1, true); } void localDown(cudamat* images, cudamat* filters, cudamat* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets){ _imgActs(images, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, 1, false); } #ifdef __cplusplus } #endif