text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#define AlignSize(x,y) (x + y - 1) & ~(y - 1) #define TWO_MB 2 * 1024 * 1024 #define SIXTY_FOUR_KB 64 * 1024 #define FOUR_KB 4 * 1024 #define CUDA_CHECK(status) \ if (status != cudaSuccess) \ { \ printf("%s:%d CudaError: %s\n", __FILE__, __LINE__, cudaGetErrorString(status)); \ assert(0); \ } enum Kerne...
the_stack
#include "bondsKernelsGpu.h" __device__ int monthLengthKernelGpu(int month, bool leapYear) { int MonthLength[12]; MonthLength[0]=31; MonthLength[1]=28; MonthLength[2]=31; MonthLength[3]=30; MonthLength[4]=31; MonthLength[5]=30; MonthLength[6]=31; MonthLength[7]=31; MonthLength[8]=30; MonthLengt...
the_stack
namespace cgbn { __device__ __forceinline__ uint32_t add_cc(uint32_t a, uint32_t b) { uint32_t r; asm volatile ("add.cc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b)); return r; } __device__ __forceinline__ uint32_t addc_cc(uint32_t a, uint32_t b) { uint32_t r; asm volatile ("addc.cc.u32 %0, %1, %2;" : "=r"(...
the_stack
#include "Thinning.h" #include <iostream> using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:DEF_PATTERN_SIZE // 定义了 PATTERN 表的默认大小。 #define DEF_PATTERN_SIZE 512 // 宏:DEF_PATTERN_LEN // 定义了 PATTERN 表的个数。 #define DEF_PATTERN_LEN 4 // 宏:CST_IMG_W...
the_stack
#include <thrust/device_malloc.h> #include <thrust/device_new.h> #include <thrust/device_ptr.h> #include "nvblox/gpu_hash/cuda/gpu_hash_interface.cuh" #include "nvblox/gpu_hash/cuda/gpu_indexing.cuh" #include "nvblox/utils/timing.h" namespace nvblox { __device__ inline bool isTsdfVoxelValid(const TsdfVoxel& voxel) {...
the_stack
// FeatureVecCalc.cu // 实现计算起始特征向量 #include "FeatureVecCalc.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 256 #define DEF_BLOCK_Y 1 // 结构体 FeatureVectorProcessorParam(特征向量处理参数) // 该结构体中定义了初始特征向量进一步处理的参数,有 // cv、sd、nc 的上限,下限,均值。 typedef struct ProcessorParam_st { float mincv...
the_stack
#include <thrust/iterator/transform_iterator.h> #include <thrust/transform.h> #include <thrust/logical.h> /* * Note: * This implementation assumes that off-diag entries all have the opposite sign * comparing to the diag entry. This is true for most practical cases. * It would even work if the offending off-diag entrie...
the_stack
* \test Testing the BLAS level 1 routines in the ViennaCL BLAS-like shared library **/ // include necessary system headers #include <iostream> #include <vector> #include <cmath> // Some helper functions for this tutorial: #include "viennacl.hpp" #include "viennacl/vector.hpp" template<typename ScalarType> Scalar...
the_stack
#pragma once #include <gunrock/app/problem_base.cuh> namespace gunrock { namespace app { namespace sage { /** * @brief Speciflying parameters for SSSP Problem * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParam...
the_stack
#ifdef GPU #if ( MODEL == HYDRO ) #if ( FLU_SCHEME == RTVD ) __global__ void CUFLU_FluidSolver_RTVD( real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ], real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ], const double g_Corner[][3], const real g_Pot_USG[][ C...
the_stack
#include <iostream> #include <vector> #include <memory> #include "opencv2/core/cuda/utility.hpp" #include "opencv2/cudalegacy/NPP_staging.hpp" #include "opencv2/cudalegacy/NCVBroxOpticalFlow.hpp" typedef NCVVectorAlloc<Ncv32f> FloatVector; ///////////////////////////////////////////////////////////////////////////...
the_stack
#include <cfloat> #include "utility.hpp" using namespace ppl::common; namespace ppl { namespace cv { namespace cuda { #define BLOCK_SIZE 128 #define BLOCK_SHIFT 7 #define MAX_BLOCKS 256 #define UCHAR_MIN 0 static __device__ uint block_count = 0; __DEVICE__ void checkMinMax1(uchar value, int index, int element_x, ...
the_stack
#define LBANN_ENTRYWISE_BATCH_NORMALIZATION_LAYER_INSTANTIATE #include "lbann/comm_impl.hpp" #include "lbann/layers/regularizers/entrywise_batch_normalization.hpp" #include "lbann/weights/weights_helpers.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { /** * On input, sums and sqsums are a...
the_stack
#include <ops/declarable/helpers/s_t_b.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void batchToSpaceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* v...
the_stack
#include <iostream> #include "viennacl.hpp" #include "viennacl_private.hpp" //include basic scalar and vector types of ViennaCL #include "viennacl/scalar.hpp" #include "viennacl/vector.hpp" //include the generic inner product functions of ViennaCL #include "viennacl/linalg/inner_prod.hpp" //include the generic norm...
the_stack
#include "modules/perception/inference/tensorrt/plugins/kernels.h" #include "modules/perception/inference/tensorrt/plugins/rpn_proposal_ssd_plugin.h" namespace apollo { namespace perception { namespace inference { // TODO(chenjiahao): add heat_map_b as anchor_offset // output anchors dims: [H, W, num_anchor_per_point...
the_stack
// // This project supports comparisons against multi-core SSE-enabled CPUs using // conditional compilation of the SSW library: // // https://github.com/mengyao/Complete-Striped-Smith-Waterman-Library // // In order to perform these additional tests, the user must download ssw.h and ssw.c // from the above repository...
the_stack
#include "sha3.h" #include "sha3_cu.h" #include <cuda.h> #include <inttypes.h> #include <stdio.h> #include <string.h> static const char *_cudaErrorToString(cudaError_t error) { switch (error) { case cudaSuccess: return "cudaSuccess"; case cudaErrorMissingConfiguration: return "cudaErrorMissingConfigurat...
the_stack
#include "core/providers/cuda/cu_inc/common.cuh" #include "tile_impl.h" namespace onnxruntime { namespace cuda { #ifdef USE_ROCM constexpr int num_elements_per_thread = 2; constexpr int num_threads_per_block = 512; #else constexpr int num_elements_per_thread = GridDim::maxElementsPerThread; constexpr int num_threads_...
the_stack
#define STR1(X) #X #define STR(X) STR1(X) #define STRINGIFY(X,Y) X ## Y #define CON(X,Y) STRINGIFY(X,Y) #define KDir kernels #include "includes/ourmacros.h" extern __shared__ type tile[]; __device__ void fvinomgeneral_main(const type * __restrict__ Atmp, type * __restrict__ Btmp, const int tb_size, const int* _...
the_stack
* \test Tests vector operations (BLAS level 1) for unsigned integer arithmetic. **/ // // *** System // #include <iostream> #include <iomanip> #include <vector> // // *** ViennaCL // #include "viennacl/vector.hpp" #include "viennacl/vector_proxy.hpp" #include "viennacl/linalg/inner_prod.hpp" #include "viennacl/lin...
the_stack
#include <cuml/manifold/umapparams.h> #include <cuml/common/logger.hpp> #include <cuml/manifold/common.hpp> #include "optimize.cuh" #include "supervised.cuh" #include "fuzzy_simpl_set/runner.cuh" #include "init_embed/runner.cuh" #include "knn_graph/runner.cuh" #include "simpl_set_embed/runner.cuh" #include <memory> ...
the_stack
#include <math.h> #ifdef WIN32 #include <float.h> # define isnan(x) _isnan(x) # define isinf(x) (! _finite(x)) #endif #define notanum(x) (isnan(x) || isinf(x)) /* SFILE_BEGIN */ #include "essential_matrix_5pt_dcl.h" #include "essential_matrix_5pt.h" typedef double Matches[][3]; /* SFILE_END */ // Actual expected...
the_stack
#include <stdlib.h> #include <stdio.h> #include "cuda.h" int nblock_size = 64; int ngrid_size = 1; int maxgsx = 65535; int mmcc = 0; static int devid; static cudaError_t crc; __global__ void emptyKernel() {} /*--------------------------------------------------------------------*/ extern "C" void setgbsize(int nbloc...
the_stack
#pragma once #include <gunrock/app/problem_base.cuh> namespace gunrock { namespace app { namespace rw { /** * @brief Speciflying parameters for RW Problem * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameter...
the_stack
#include <iostream> #include <sstream> #include <map> #include <vector> #include <stdio.h> #include <cublas_v2.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include "somoclu.h" #ifdef _WIN32 #define popen _popen #define pclose _pclose #...
the_stack
* rtk #includes * *****************/ #include "rtkCudaUtilities.hcu" #include "rtkConfiguration.h" #include "rtkCudaIntersectBox.hcu" #include "rtkCudaWarpForwardProjectionImageFilter.hcu" /***************** * C #includes * *****************/ #include <cstdio> #include <cstdlib> #include <cstring> #include <cma...
the_stack
#include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/instance_normalization.hpp> #include <nbla/variable.hpp> // Kernels and ops #include <nbla/cuda/function/kernel/instance_normalization.cuh> #include <nbla/cuda/function/kernel/normalization.cuh> #include <nbla/cuda/utils/reduce_ops/...
the_stack
#include "image/unpack.hpp" #include "colorArrayDevice.hpp" #include "backend/cuda/deviceBuffer.hpp" #include "backend/cuda/deviceBuffer2D.hpp" #include "backend/cuda/surface.hpp" #include "backend/cuda/deviceStream.hpp" #include "cuda/util.hpp" #include "unpackKernel.cu" #include <cuda_runtime.h> #include <cassert...
the_stack
#include <stdio.h> #include <math.h> #include <float.h> #include "filter_sample_depthwise_cuda.h" using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) c...
the_stack
Parallel reduction kernels */ #ifndef _REDUCE_KERNEL_H_ #define _REDUCE_KERNEL_H_ #include <stdio.h> #include "sharedmem.cuh" #if 1 #define EMUSYNC __syncthreads() #else #define EMUSYNC #endif // Macros to append an SM version identifier to a function name // This allows us to compile a file multiple times for dif...
the_stack
//#define UNROLL_INNER //#define IMUL(a, b) __mul24(a, b) #include <utility_kernels.h> namespace vision { // OpenGL mapped input textures texture<uchar4, cudaTextureType2D, cudaReadModeElementType> d_rgba_texture; texture<float, cudaTextureType2D, cudaReadModeElementType> d_float_texture0; texture<float, cud...
the_stack
#include "Segmentation.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 256 #define DEF_BLOCK_Y 1 // 核函数:_countW1Ker(统计各个向量 W1 近旁的向量个数) // 该方法首先计算当前向量与其他各个向量之间的三个距离度量(坐标的欧式距离, // 特征值的欧式距离以及向量的夹角),如果在 W1 范围内,则将当前 index 的 // count 个数加一,最终得到统计结果 static __global__ void ...
the_stack
/* Utility functions */ //Computes the storage index of the tree-traverse stack template<int SHIFT> __forceinline__ __device__ int ACCS(const int i) { return (i & ((LMEM_STACK_SIZE << SHIFT) - 1))*blockDim.x + threadIdx.x; } #define BTEST(x) (-(int)(x)) __forceinline__ __device__ float4 get_float4(float4 const...
the_stack
#include <thrust/unique.h> #include <cub/block/block_radix_sort.cuh> #include "filtering_util.h" #include "rxmesh/attribute.h" #include "rxmesh/context.h" #include "rxmesh/kernels/query_dispatcher.cuh" #include "rxmesh/util/vector.h" constexpr float EPS = 10e-6; /** * compute_vertex_normal() */ template <typename...
the_stack
#include "nnbilinearsampler.hpp" #include "datacu.hpp" #include "impl/cudnnhelper.hpp" #include <assert.h> #include <algorithm> using namespace std ; using namespace vl ; using namespace vl::nn ; using namespace vl::impl ; #if CUDNN_VERSION < 5000 #warning "bilinearsampler_cudnn.cu will be disabled as it requires CUD...
the_stack
#include "src/common.hpp" #include "src/scf.hpp" #include "src/integrate.hpp" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/inner_product.h> using namespace std; using namespace etics; Integrator IntegratorObj; // GLOBAL VARIABLES Real ConstantStep = 0.001953125; Real T, Step, dT...
the_stack
//#define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y){ x = _x, y = _y; } __device__ void set(float _x, float _y){ x = _x; y = _y; } _...
the_stack
#include "doctest_proxy.hpp" #include <Core/Array/ArrayView.hpp> #include <Core/CUDA/CUDAArray.hpp> #include <Core/CUDA/CUDAArrayView.hpp> using namespace CubbyFlow; TEST_CASE("[CUDAArray2] - Constructors") { { CUDAArray2<float> arr; CHECK_EQ(0u, arr.Width()); CHECK_EQ(0u, arr.Height()); ...
the_stack
namespace cunumeric { using namespace Legion; using namespace legate; //////////////////////////////////// // Direct convolution implementation //////////////////////////////////// // Convolution should be able to hit FMA throughput limits // on the GPU due to the amount of FLOPs needed to be performed // given the ...
the_stack
#include <pcl/gpu/utils/device/limits.hpp> #include <pcl/gpu/utils/device/algorithm.hpp> #include <pcl/gpu/utils/device/warp.hpp> #include <pcl/gpu/utils/device/static_check.hpp> //#include <pcl/gpu/utils/device/funcattrib.hpp> #include <pcl/gpu/utils/safe_call.hpp> #include <thrust/tuple.h> #include <thrust/iterator/...
the_stack
#ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/cudev.hpp" using namespace cv::cudev; void subScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const GpuMat& mask, double, Stream& stream, int); namespace { template <typename SrcType, typename ScalarType, type...
the_stack
#include <algorithm> #include <cstdio> #include <limits> #include <numeric> #include <random> #include <vector> #include "k2/csrc/nbest.h" #include "k2/csrc/ragged.h" #include "k2/csrc/ragged_ops.h" namespace k2 { TEST(AlgorithmsTest, TestSuffixArray) { ContextPtr cpu = GetCpuContext(); for (int i = 0; i < 100;...
the_stack
using at::Half; using at::Tensor; #define DIVUP(m, n) ((m) / (m) + ((m) % (n) > 0)) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ for (in...
the_stack
using namespace gpu_treeshap; // NOLINT class ParameterisedModelTest : public ::testing::TestWithParam< std::tuple<size_t, size_t, size_t, size_t, size_t>> { protected: ParameterisedModelTest() { size_t max_depth, num_paths; std::tie(num_rows, num_features, num_groups, max_depth, num_paths) =...
the_stack
#include <amgx_types/util.h> #include <solvers/block_common_solver.h> #include <amgx_cublas.h> //TODO remove synchronization from this module by moving host operations to the device namespace amgx { template< class T_Config> GMRES_Solver<T_Config>::GMRES_Solver( AMG_Config &cfg, const std::string &cfg_scope ) : ...
the_stack
#include <cstdio> #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREAD...
the_stack
#include <omp.h> #ifdef HAVE_CUB #include <cub/block/block_reduce.cuh> #endif // HAVE_CUB #ifdef USE_NVTX #include <nvToolsExt.h> const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff}; const int num_colors = sizeof(colors) / sizeof(...
the_stack
namespace amgx { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > __global__ void split_l_and_u(int n, const T *lu, int lda, T *l, T *u ) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim...
the_stack
#include "antialias.h" //------------------------------------------------------------------------ // Helpers. #define F32_MAX (3.402823466e+38f) static __forceinline__ __device__ bool same_sign(float a, float b) { return (__float_as_int(a) ^ __float_as_int(b)) >= 0; } static __forceinline__ __device__ bool rational_g...
the_stack
#pragma once #include <cuda_runtime.h> #include "libvis/cuda/cuda_buffer.cuh" #include "libvis/cuda/cuda_matrix.cuh" #include "libvis/cuda/cuda_unprojection_lookup.cuh" #include "libvis/cuda/pixel_corner_projector.cuh" #include "libvis/cuda/patch_match_stereo_samples.cuh" #include "libvis/cuda/patch_match_stereo_util...
the_stack
#include <thrust/device_ptr.h> #include <thrust/sort.h> #include <thrust/reverse.h> #include <thrust/reduce.h> #include <thrust/merge.h> #include <thrust/fill.h> #include <thrust/iterator/reverse_iterator.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #define BLOCKDIM 32 __global__ void __c...
the_stack
// InscribedCircle // 实现的曲线内接圆 #include "InscribedCircle.h" #include <iostream> #include <fstream> #include <cmath> #include <algorithm> using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:IN_LABEL 和 OUT_LABEL // 定义了曲线内的点和曲线外的点标记值 #define ...
the_stack
#include <cmath> #include "core/context_cuda.h" #include "utils/cuda_device.h" #include "utils/math_functions.h" #include "utils/cast.h" namespace dragon { namespace math { /******************** Level-0 ********************/ template <typename T> __global__ void _Set(const int n, const T alpha, T* x) { CUDA_KE...
the_stack
// threads per block #define THREADS1 256 // must be a power of 2 #define THREADS2 256 #define THREADS3 256 #define THREADS4 256 #define THREADS5 256 #define THREADS6 256 // block count = factor * #SMs #define FACTOR1 2 #define FACTOR2 2 #define FACTOR3 1 // must all be resident at the same time #define FACTOR4 1 ...
the_stack
namespace caffe { /////////////////////////////////////////////////////////////////// template <typename Dtype> __global__ void forward_affine(const int count, const int channels_, const int height_, const int width_, const int output_H_, const int output_W_, const Dtype* in, const Dtype* theta, Dtype* sou...
the_stack
#include <cuda_runtime.h> #include "cublas_v2.h" #include "buffalo/cuda/utils.cuh" #include "buffalo/cuda/als/als.hpp" namespace cuda_als{ using std::invalid_argument; using namespace cuda_buffalo; __global__ void least_squares_cg_kernel(const int dim, const int vdim, const int rows, const int op_rows, ...
the_stack
#include <cstdint> #include <cstdio> #include "histogram_16_64_256.hu" namespace LightGBM { // atomic add for float number in local memory inline __device__ void atomic_local_add_f(acc_type *addr, const acc_type val) { atomicAdd(addr, static_cast<acc_type>(val)); } // histogram16 stuff #ifdef ENABLE_ALL_FEATURE...
the_stack
//TODO remove synchronization from this module by moving host operations to the device namespace amgx { template <class TConfig> void KrylovSubspaceBuffer<TConfig>::set_max_dimension(int max_dimension) { m_V_vector.resize( max_dimension + 2 ); m_Z_vector.resize( max_dimension + 1 ); //set the pointers to...
the_stack
#include <cudpp_globals.h> #include <stdio.h> #include "cudpp_util.h" #define IDX (threadIdx.x + (blockIdx.x * blockDim.x)) /** * @file * sa_kernel.cuh * * @brief CUDPP kernel-level suffix array routines */ /** \addtogroup cudpp_kernel * @{ */ /** @name Suffix Array Functions * @{ */ typedef unsigned int u...
the_stack
template<> void XlnetLayer<float>::blockRelShiftBd(dim3 &grid, dim3& block){ grid.x=batch_size; grid.y=head_num; grid.z=seq_len; block.x=seq_len*2; } template<> void XlnetLayer<__half>::blockRelShiftBd(dim3 &grid, dim3& block){ int threads=512; int seq_dim1=threads/seq_len; int seq_dim2=seq...
the_stack
// InputData InputData::InputData(int batch_size, int seq_len){ this->batch_size=batch_size; this->seq_len=seq_len; } InputData::~InputData(){ } // InputDataHost InputDataHost::InputDataHost(int batch_size, int seq_len):InputData(batch_size,seq_len){ inp_k=new int[batch_size * seq_len]; input_mask=ne...
the_stack
#include <cuda.h> #include "DataFormats/EcalDigi/interface/EcalDataFrame.h" #include "DataFormats/EcalDigi/interface/EcalMGPASample.h" #include "DataFormats/EcalRecHit/interface/EcalUncalibratedRecHit.h" #include "DataFormats/Math/interface/approx_exp.h" #include "DataFormats/Math/interface/approx_log.h" #include "FWC...
the_stack
namespace layer_norm { template<typename Ktraits> __global__ __launch_bounds__(Ktraits::THREADS_PER_CTA) void ln_bwd_kernel(layer_norm::BwdParams params) { enum { ROWS_PER_CTA = Ktraits::ROWS_PER_CTA }; enum { WARPS_M = Ktraits::WARPS_M }; enum { WARPS_N = Ktraits::WARPS_N }; enum { THREADS_PER_ROW =...
the_stack
************************************************************************** * \file dct8x8_kernel2.cu * \brief Contains 2nd kernel implementations of DCT and IDCT routines, used in * JPEG internal data processing. Optimized device code. * * This code implements traditional approach to forward and inverse Discret...
the_stack
#include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/strings_column_factories.cuh> #include <cudf/strings/...
the_stack
#pragma once #include <gunrock/util/array_utils.cuh> namespace gunrock { namespace oprtr { #define FORALL_BLOCKSIZE 256 #define FORALL_GRIDSIZE 256 /*template < typename T, typename SizeT, typename ApplyLambda> __global__ void ForAll_Kernel( T *d_array, ApplyLambda apply, SizeT ...
the_stack
#include <array/NDArrayFactory.h> #include <exceptions/cuda_exception.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> namespace sd { nam...
the_stack
#define CUDA_NUM_THREADS 256 #define THREADS_PER_BLOCK 64 #define DIM0(TENSOR) ((TENSOR).x) #define DIM1(TENSOR) ((TENSOR).y) #define DIM2(TENSOR) ((TENSOR).z) #define DIM3(TENSOR) ((TENSOR).w) #define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) *...
the_stack
namespace hvvr { template <PixelFormat PixelFormat> CUDA_DEVICE void writeSurface(vector4 val, Texture2D tex, unsigned int x, unsigned int y) { if (PixelFormat == PixelFormat::RGBA32F) { surf2Dwrite(float4(val), tex.d_surfaceObject, x * sizeof(float4), y); } else if (PixelFormat == PixelFormat::RGBA16)...
the_stack
void invert_cpu(float* data, int actualsize, float* log_determinant) { int maxsize = actualsize; int n = actualsize; *log_determinant = 0.0; if (actualsize == 1) { // special case, dimensionality == 1 *log_determinant = ::logf(data[0]); data[0] = 1.0 / data[0]; } else if(actualsize >= 2) { // dimens...
the_stack
#include <cooperative_groups.h> #include <library/cpp/cuda/wrappers/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> using namespace cooperative_groups; namespace NKernel { template <int Bits, int BlockSize> struct ...
the_stack
#include "cuda_kernels.h" #include "cub/cub.cuh" #include <assert.h> #include <cstdio> #include <cstdlib> #include <climits> #include <cfloat> #include <vector> #include <type_traits> namespace fastertransformer { /* ********************************** common kernel *********************************** */ template ...
the_stack
//////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Depth bilateral filter namespace kfusion { namespace device { __global__ void bilateral_kernel(const PtrStepSz<ushort> src, PtrStep<ushort> dst, const int ksz, const float sigma_spatial2...
the_stack
DEV static real_t terminal_U_penalty(const base_t *s, const int i, const int j, param_t p) { return s[i] == U || s[j] == U ? p->terminal_AU_penalty : RCONST(0.); } DEV static real_t dangle_3p_energy(const base_t *s, const int i, const int j, const int ip1, para...
the_stack
#define DEBUG_DEV #ifdef DEBUG_DEV #define getErrorCuda(command)\ command;\ cudaDeviceSynchronize();\ cudaThreadSynchronize();\ if (cudaPeekAtLastError() != cudaSuccess){\ std::cout << #command << " : " << cudaGetErrorString(cudaGetLastError())\ << " in file " << __FILE__ << " at line " << __LINE__ << s...
the_stack
#include "chrono_multicore/cuda/ChMPM.cuh" #include "chrono_multicore/cuda/ChMPMUtils.h" #include "chrono_multicore/cuda/ChCudaHelper.cuh" #include "chrono_multicore/cuda/ChGPUVector.cuh" #include <cub/cub.cuh> #include "chrono_multicore/cuda/matrixf.cuh" //#define BOX_YIELD #define SPHERE_YIELD //#define DRUCKER_PRAG...
the_stack
#include <cuda.h> // GPU implementation of proper Marian top-k operator for TopkNodeOp // This file contains a lot of code-duplicaton with src/translator/nth_element.cu // the goal is to replace the beam-search specific topk search with this code. // Currently this is only used in the unit tests, but we will move for...
the_stack
#define BUILD_DEV __device__ namespace anakin{ namespace saber{ template<typename Dtype> __global__ void ker_relu_fwd(Dtype * out_data, const Dtype* in_data, const int count, Dtype neg_slop, int in_n, int in_c, int in_h, int in_w, ...
the_stack
#pragma once #include <cuda_runtime.h> #include <libvis/cuda/cuda_buffer.cuh> #include <libvis/libvis.h> #include <math_constants.h> #include "camera_calibration/cuda/cuda_matrix.cuh" #include "camera_calibration/models/cuda_camera_model.cuh" namespace vis { // TODO: Move the functions below to a better place __fo...
the_stack
/** * Copyright (c) 2021 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/imageProcessing/imageProcessing.h" #include "saiga/cuda/tests/test.h" #include "saiga/cuda/tests/test_helper.h" namespace Saiga { namespace...
the_stack
#include <doctest.h> #include <heteroflow/heteroflow.hpp> // ---------------------------------------------------------------------------- // Parameters // ---------------------------------------------------------------------------- const size_t C = std::min(16u, std::thread::hardware_concurrency()); const size_t G = s...
the_stack
/*-- Sort Transform is patented by Michael Schindler under US patent 6,199,064. However for research purposes this algorithm is included in this software. So if you are of the type who should worry about this (making money) worry away. The author shall have no liability with respect to the infringement of copyrights, ...
the_stack
// Description //======================================================================================================================================================150 // USE //==========================================================================================================================================...
the_stack
#include "libhmsbeagle/GPU/GPUImplDefs.h" #include "libhmsbeagle/GPU/kernels/kernelsAll.cu" // This file includes the non-state-count specific kernels extern "C" { #endif /////////////////////////////////////////////////////////////////////////////// // kernel macros CPU #define DETERMINE_INDICES_X_CPU()\...
the_stack
#include "nvblox/core/blox.h" #include "nvblox/core/cuda/error_check.cuh" #include "nvblox/core/interpolation_2d.h" namespace nvblox { namespace test_utils { __global__ void transformPointsOnGPU(const Eigen::Matrix3f* R_B_A_matrix_ptr, const Eigen::Vector3f* t_B_A_matrix_ptr, ...
the_stack
#include "kernel_common.h" #include "geometry/grid_3d.h" #include "geometry/SE3.h" #include "optimization/optimization.h" #include "util/mirrored_memory.h" namespace dart { static const float truncVal = 1000.0; // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- template <bool dbgDA, bool dbgErr, bool dbgNorm> __glob...
the_stack
namespace nvbio { namespace priv { struct pack_flags_functor { // constructor pack_flags_functor( const uint32 _n, const uint8* _flags, uint32* _comp_flags) : n( _n ), flags( _flags ), comp_flags( _comp_flags ) {} // functor operator NVBIO_FORCEINLINE NVBIO_...
the_stack
// 2006.03 Rob Janiczek // --creation of prototype version // 2006.03 Drew Gilliam // --rewriting of prototype version into current version // --got rid of multiple function calls, all code in a // single function (for speed) // --code cleanup & commenting // --cod...
the_stack
// Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: joaander /*! \file NeighborListGPU.cu \brief Defines GPU kernel code for neighbor list processing on the GPU */ #include "NeighborListGPU.cu...
the_stack
// CUDA kernel #include "kernel.h" void kernel_gpu_wrapper( params_common common, int* endoRow, int* endoCol, int* tEndoRowLoc, int* tEndoColLoc, int* epiRow, int* epiCol, int* tEpiRowLoc, int* tEpiColLoc, avi_t* frames) { // common //printf("tSize is %d, sSize is %d\n", co...
the_stack
* Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable l...
the_stack
#include "common.h" #include "texture.h" //------------------------------------------------------------------------ // Memory access and math helpers. static __device__ __forceinline__ void accum_from_mem(float* a, int s, float b, float c) { a[0] += b * c; } static __device__ __forceinline__ void accum_from_mem(floa...
the_stack
#include <cuda.h> //#define DEBUG // calculate the IoU of a single box against another box __device__ float calc_single_iou(const float4 b1, const float4 b2) { // (lt), (rb) float l = max(b1.x, b2.x); float t = max(b1.y, b2.y); float r = min(b1.z, b2.z); float b = min(b1.w, b2.w); float first = (r - l); ...
the_stack
#include <iostream> #include <chrono> #include <algorithm> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" #include "Common.cuh" #define BBCU_BATCHNORM_FW_BLOCK_SIZE 128 #define BBCU_BATCHNORM_BW_BLOCK_SIZE 128 /* ////////////////////////////...
the_stack
//------------------------------------------------------------------------------------------------- // This file is based on Peter Shirley's book "Ray Tracing in One Weekend" // #include <cstdlib> #include <iostream> #include <ostream> #include <memory> #include <GL/glew.h> #include <cuda_runtime_api.h> #include <t...
the_stack
#include <thrust/iterator/transform_iterator.h> template <typename FloatT, typename IdxType> RepresentationsStorage<FloatT, IdxType>::RepresentationsStorage( const size_t num_objects, const size_t size, Streams* const streams) : reprs_(size, num_objects, streams->next()) { PROFILE_FUNCT...
the_stack
#pragma once #include <math/vector.h> #include <math/matrix.h> #include <utils.h> #include <warp_ops.cuh> #include "config.h" #include "instrumentation.cuh" #include "BinRasterizer.cuh" #include "TileRasterizer.cuh" #include "StampShading.cuh" #include "TileRasterizerMask.cuh" #include "viewport.cu...
the_stack
************************************************************************** * \file dct8x8.cu * \brief Contains entry point, wrappers to host and device code and benchmark. * * This sample implements forward and inverse Discrete Cosine Transform to blocks * of image pixels (of 8x8 size), as in JPEG standard. The typical...
the_stack
// TODO describe me #define GB_CUDA_KERNEL //#include <cstdint> #include "GB_cuda_buckets.h" #include "matrix.h" #include <cooperative_groups.h> #include "local_cub/block/block_scan.cuh" using namespace cooperative_groups; // A stateful callback functor that maintains a running prefix to be applied // during conse...
the_stack