text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
template<typename Dtype> __global__ static void _fix_neuron_v1(const int N, const Dtype* src, const Dtype* fragpos, Dtype* dst, int val_max, ...
the_stack
namespace SparseOperationKit { struct IdenticalHash { using result_type = uint32_t; IdenticalHash() = default; template <typename TKey> static __device__ result_type compute(TKey const &key) { return static_cast<result_type>(key); } }; /*It will dispatcher keys based on key % GPU_NUM */ template <type...
the_stack
#include <nbla/cuda/function/kernel/sync_batch_normalization.cuh> namespace nbla { static bool can_use_int_as_index_t(const Size_t size0, const Size_t size1, const Size_t size2) { return size0 * size1 * size2 < std::numeric_limits<int>::max(); } template <typename T> void forward...
the_stack
* Test evaluation for caching allocator of device memory ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <cub/util_allocator.cuh> #include "test_util.h" using namespace cub; //-----...
the_stack
#include <Environment.h> #include <loops/transform.h> #include <op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> template <typename T> __device__ void transformGeneric( int opNum, Nd4jLong n, T *dy, Nd4jLong incy, T *params, T *result, Nd4jLong resultStride, int *alloc...
the_stack
// CurveTracing // 实现的曲线跟踪 #include "CurveTracing.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; // 宏:CURVE_VALUE(曲线最大数目) // 设置图像能获得的曲线最大数目 #define CURVE_VALUE 1000 // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // Kernel 函数:_...
the_stack
* @file * Operations for reading linear tiles of data into the CUDA warp. */ #pragma once #include <iterator> #include <type_traits> #include <cub/block/block_load.cuh> #include <cub/config.cuh> #include <cub/iterator/cache_modified_input_iterator.cuh> #include <cub/util_ptx.cuh> #include <cub/util_type.cuh> #incl...
the_stack
#define BLOCK_SIZE_0 256 #define BLOCK_SIZE_1_X 16 #define BLOCK_SIZE_1_Y 16 long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) +tv.tv_usec; } // create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06 void create_matrix(float *m, int size){...
the_stack
#include "nnroipooling.hpp" #include "impl/dispatcher.hpp" #include <limits> #include <cassert> #include <cstring> #include <cmath> #include <iostream> using namespace vl ; using namespace vl::nn ; using namespace vl::impl ; template<DeviceType deviceType, DataType dataType> struct ROIPoolingForward ; template<Device...
the_stack
#define FUNC1(i) \ { \ Ajreg = Ajlocal[i]; \ Axreg = Axlocal[i]; \ if(Ajreg > 0) \ { ...
the_stack
#include <nvbio-test/alignment_test_utils.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvbio/basic/cuda/ldg.h> #include <nvbio/basic/cached_iterator.h> #include <nvbio/basic/packedstream.h> #include <nvbio/basic/packedstream_loader.h> #include <nvbio/basic/vector_view.h> #include <nvbio/...
the_stack
#include <fast_gicp/cuda/vector3_hash.cuh> namespace fast_gicp { namespace cuda { // point coord -> voxel coord conversion struct voxel_coord_kernel { voxel_coord_kernel(const thrust::device_ptr<const VoxelMapInfo>& info) : voxelmap_info_ptr(info) {} __host__ __device__ Eigen::Vector3i operator()(const Eigen::Ve...
the_stack
using namespace torch; #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <iostream> // Cuda tensor accessor definitions // restrict pointer traits piroritize speed over memory consumption #define TensorAcc4R PackedTensorAccessor32<scalar_t,4,RestrictPtrTraits> #define TensorAcc5R PackedTensorAcc...
the_stack
// redistribute between group-cyclic distributions with different cycles // c0 <= c1, n-dimensional version __global__ void gpu_b2c_pack_kernel_nd(unsigned int local_size, int *d_c0, int *d_c1, int ndim, ...
the_stack
#ifndef DIVUP #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #endif #ifndef MIN #define MIN(x,y) ((x < y) ? x : y) #endif #ifndef MAX #define MAX(x,y) ((x > y) ? x : y) #endif #include "cublas.h" #ifdef __cplusplus extern "C" { #endif struct cudamat { float* data_host; float* data_device; int on_device; ...
the_stack
namespace cub = hipcub; #endif #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/batch_norm_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/norm_utils.cu.h" #include "paddle/fluid/platform/float16.h" DECLARE_bool(cudnn_batchnorm_spatial_persi...
the_stack
#define BYDIMF 5 #define CDIM 10 #define BYDIMB 5 #if __CUDA_ARCH__ >= 300 /* * Positive kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos(int nrows, int ncol...
the_stack
#include <boost/array.hpp> #include <boost/random.hpp> #include "caffe/util/rng.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/pixel_feature_layer.hpp" #include "caffe/util/gpu_util.cuh" namespace caffe { template <typename Dtype> __global__ void PixelFeatureXYForwardGPU(const int nthreads, ...
the_stack
\brief Unit tests for thread-level GEMM */ #include <fstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h" #include ...
the_stack
using pcl::gpu::people::trees::Node; using pcl::gpu::people::trees::Label; using pcl::gpu::people::trees::AttribLocation; using pcl::gpu::people::trees::Attrib; using pcl::gpu::people::trees::focal; using pcl::gpu::people::trees::NUM_LABELS; using namespace std; typedef unsigned int uint; #ifdef __CDT_PARSER__ // Thi...
the_stack
#include "miner.h" extern "C" { #include "sph/sph_blake.h" } /* threads per block and nonces per thread */ #define TPB 768 #define NPT 192 #define maxResults 16 /* max count of found nonces in one call */ #define NBN 2 /* hash by cpu with blake 256 */ extern "C" void blake256_14roundHash(void *output, const void *in...
the_stack
//////////////////////////////////////////////////////////////////////////////// // Global types //////////////////////////////////////////////////////////////////////////////// #include <stdlib.h> #include <stdio.h> #include <cutil_inline.h> #include "realtype.h" #include "MonteCarlo_common.h" /////////////////////...
the_stack
#include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/vec_traits.hpp" #include "opencv2/core/cuda/vec_math.hpp" #include "opencv2/core/cuda/limits.hpp" #include "opencv2/core/cuda/color.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" namespace cv { namespace cuda { namespace device { template <ty...
the_stack
using namespace Yolo; namespace { // Write values into buffer template <typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } // Read values from buffer template <typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const ...
the_stack
#include <cuml/matrix/kernelparams.h> #include <cuml/common/logger.hpp> #include <matrix/grammatrix.cuh> #include <matrix/kernelfactory.cuh> #include <raft/linalg/cublas_wrappers.h> #include <raft/linalg/gemv.h> #include <raft/linalg/unary_op.cuh> #include <raft/cudart_utils.h> #include <thrust/device_ptr.h> #includ...
the_stack
#include "cupoch/geometry/image.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/utility/console.h" #include "cupoch/utility/platform.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/shader/texture_phong_shader.h" #include "cupoch/vis...
the_stack
#include "cuda_pointer.h" //#include "cutil_inline.h" namespace regf4 { #include "dev_regf4.cu" #define NJBLOCK 32 #define NJBLOCK2 32 #define NJTHREAD 128 #define NPIPE 512 enum {NGB_PER_BLOCK = NGBlist::NGB_MAX}; /************************************************/ int ni_tot, ni_tot_round, ni_max; do...
the_stack
using namespace cuHE; //#define checkEachRound const int circuitDepth = 25; int RC[768] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,1,0,0,1,1,0,0,0,1,1,0,0,1,1,0,0,0,1,0,1,0,0,0,1,0,1,1,1,0, 0,0,0,0,0,0,1,1,0,1,1,1,0,0...
the_stack
#include "MemoryManager.h" #include "cuda_runtime.h" using namespace std; MemSegment::MemSegment():_basePointer(NULL),_size(0),_type(UnDefined) {}; MemSegment::MemSegment(void * ptr, size_t size, MemoryType type):_basePointer((char*)ptr),_size(size),_type(type){}; MemSegment::MemSegment(const MemSegment& src):_baseP...
the_stack
namespace surfelwarp { namespace device { //The kernel to build the candidate surfel and finite diff vertex __global__ void buildCandidateSurfelAndFiniteDiffVertexKernel( cudaTextureObject_t depth_vertex_confid_map, cudaTextureObject_t depth_normal_radius_map, cudaTextureObject_t color_time_map, mat34 camera...
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <Eigen/Core> #include <Eigen/Dense> #include <sophus/se3.hpp> #include <vector> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * g...
the_stack
#include "common.cuh" #include <kat/on_device/collaboration/grid.cuh> #include <kat/on_device/collaboration/block.cuh> #include <kat/on_device/collaboration/warp.cuh> #include <kat/on_device/atomics.cuh> using std::size_t; #if __cplusplus < 201701L #include <experimental/optional> template <typename T> using optiona...
the_stack
/** @addtogroup cudpp_app * @{ */ /** @name StringSort Functions * @{ */ #include "cuda_util.h" #include "cudpp.h" #include "cudpp_util.h" #include "cudpp_stringsort.h" #include "kernel/stringsort_kernel.cuh" #include "limits.h" #define BLOCKSORT_SIZE 1024 #define DEPTH 8 void dotAdd(unsigned int* d_address, ...
the_stack
#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k)))) /* ------------------------------------------------------------------------------- mix -- mix 3 32-bit values reversibly. This is reversible, so any information in (a,b,c) before mix() is still in (a,b,c) after mix(). If four pairs of (a,b,c) inputs are...
the_stack
* \file * cub::BlockScanRaking provides variants of raking-based parallel prefix scan across a CUDA threadblock. */ #pragma once #include "../../util_ptx.cuh" #include "../../util_arch.cuh" #include "../../block/block_raking_layout.cuh" #include "../../thread/thread_reduce.cuh" #include "../../thread/thread_scan.cu...
the_stack
#include <stdio.h> #include <stdlib.h> // CHECK: #include <hip/hip_runtime.h> #include <cuda_runtime.h> // CHECK: #include "hipsparse.h" #include "cusparse.h" // CHECK: if (y) hipFree(y); // CHECK: if (z) hipFree(z); // CHECK: if (xInd) hipFree(xInd); // CHECK: if (xVal)...
the_stack
#include "SuperSmooth.h" #include <iostream> using namespace std; #include <stdio.h> // 宏:LOCALCLUSTER_DEF_BLOCK_X 和 LOCALCLUSTER_DEF_BLOCK_Y // 以及 LOCALCLUSTER_DEF_BLOCK_Z // 定义了第 1 个 kernel 默认的线程块的尺寸,本算法中采用了三维的线程块。 #define LOCALCLUSTER_DEF_BLOCK_X 32 #define LOCALCLUSTER_DEF_BLOCK_Y 2 #define LOCALCLUSTER_DEF_BL...
the_stack
* \brief * Implements PmeGpuProgramImpl, which stores permanent PME GPU context-derived data, * such as (compiled) kernel handles. * * \author Aleksei Iupinov <a.yupinov@gmail.com> * \ingroup module_ewald */ #include "gmxpre.h" #include "pme_gpu_program_impl.h" #include "pme_gpu_constants.h" #include "pme_gpu_i...
the_stack
namespace tensorflow { #define FINAL_MASK 0xffffffff template <typename T> __inline__ __device__ T warpReduceSum(T val) { for(int mask = 16; mask > 0; mask >>= 1) val += __shfl_xor_sync(FINAL_MASK, val, mask, 32); return val; } /* Calculate the sum of all elements in a block */ template <typename T> __inline...
the_stack
#include "../fixed_point.hpp" #include "k_fixed_point.cuh" #include "kernel_utils.cuh" #include "surreal.cuh" #define WARPSIZE 32 #define PI 3.141592653589793115997963468544185161 #define TWO_OVER_SQRT_PI 1.128379167095512595889238330988549829708 // generate kv values from coordinates to be radix sorted void __global...
the_stack
* Simple caching allocator for device memory allocations. The allocator is * thread-safe and capable of managing device allocations on multiple GPUs. ******************************************************************************/ #pragma once #include <math.h> #include <set> #include <map> #include "../util/ns_umb...
the_stack
#include "fastertransformer/cuda/cub/cub.cuh" #include "fusion_gpt_op.h" #include "pd_traits.h" template <paddle::DataType D> std::vector<paddle::Tensor> gpt2_kernel( const paddle::Tensor& input, const paddle::Tensor& attn_mask, const paddle::Tensor& start_length, const paddle::Tensor& word_emb, c...
the_stack
extern "C" __global__ void bmm_tn( const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C, int M, int N, int K ){ int tid = threadIdx.x; // thread idx int bid = blockIdx.z; // batch idx // Neighboring blocks are grouped into PN x PM block groups in order to increase //...
the_stack
//#define RND_MULTIPLIERS_FILE ("rnd_multipliers_32bit.txt") #ifndef RND_MULTIPLIERS_FILE #define RND_MULTIPLIERS_FILE ("rnd_multipliers_32bit.txt") #endif #include <pthread.h> #include <map> #include <cublas_v2.h> #include <cuda.h> #include <curand.h> #include <cutil_inline.h> #include <time.h> #include <curand_kern...
the_stack
* \file dnn/src/cuda/conv_bias/quint4x4x32_wmma/reduce_with_scale_data.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the Licen...
the_stack
#include "../grid_info.cuh" #include <kat/on_device/shuffle.cuh> #include <kat/on_device/builtins.cuh> #include <kat/on_device/atomics.cuh> #include <kat/on_device/non-builtins.cuh> #include <kat/on_device/ptx.cuh> #include <kat/on_device/math.cuh> #include <kat/on_device/common.cuh> #include <type_traits> ///@cond...
the_stack
#pragma once #include <math/vector.h> #include <math/matrix.h> #include "config.h" #include "fragment_data.cuh" #include "IntermediateGeometryStorage.cuh" #include "shaders/vertex_simple.cuh" #include "shaders/clipspace.cuh" #include "ptx_primitives.cuh" extern "C" { struct Viewport { float...
the_stack
//#include <cutil.h> // cutil32.lib #include <cutil_math.h> // cutil32.lib #include <string.h> #include <assert.h> #include <windows.h> //#include <cuda_gl_interop.h> #include <stdio.h> #include <math.h> extern void app_printf ( char* format, ... ); extern void app_printEXIT ( char* format, ... ); extern char ...
the_stack
__global__ void cuda_calc_curve_values( REAL const * parameters, int const n_fits, int const n_points, int const n_parameters, int const * finished, REAL * values, REAL * derivatives, int const n_fits_per_block, int const n_blocks_per_fit, ModelID const model_id, int const ch...
the_stack
#include <stdio.h> #include <pthread.h> #include <cuda.h> #include <cutil.h> #include <multithreading.h> __global__ void eval_multi_UBspline_3d_cuda_c (const float *coefs, float *abc, float *vals, int ix, int iy, int iz, int xs, int ys, int zs, int N) { int block = blockIdx.x; int thr = thre...
the_stack
namespace cgbn { template<class env> __device__ __forceinline__ void core_t<env>::sqrt_resolve_rem(uint32_t rem[LIMBS], const uint32_t s[LIMBS], const uint32_t top, const uint32_t r[LIMBS], const uint32_t shift) { uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t mask, phi[LIMBS], plo[LIMBS], t...
the_stack
#define RAD 1 // radius of the stencil; helps to deal with "boundary conditions" at (thread) block's ends __constant__ float dev_Deltat[1]; __constant__ float dev_heat_params[2]; int blocksNeeded( int N_i, int M_i) { return (N_i+M_i-1)/M_i; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? ...
the_stack
#include "object.h" #include <cmath> #include "sim.h" #ifdef GRAPHICS #define GLM_FORCE_PURE #endif namespace titan { #ifdef GRAPHICS const Vec RED(1.0, 0.2, 0.2); const Vec GREEN(0.2, 1.0, 0.2); const Vec BLUE(0.2, 0.2, 1.0); const Vec PURPLE(0.5, 0.2, 0.5); #endif __device__ const double NORMAL = 20000; // normal...
the_stack
#pragma once #include "cuda/Complex.cuh" #include <cmath> namespace facebook { namespace cuda { namespace fbfft { namespace { #define PI 0x1.921FB6p+1f /* Computed in Sollya as round(cos(k * pi / 32), single, RN)) */ #define FBFFT32_COSF_0 0x1.p0 #define FBFFT32_COSF_1 0xf.ec46dp-4 #define FBFFT32_COSF_2 0xf.b...
the_stack
#include "separableconvflow_cuda_kernel.cuh" #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #define min(a,b) ((a<b)?(a):(b)) #define max(a,b) ((a>b)?(a):(b)) #define DEBUG (0) #ifndef BLOCKDIMX #define BLOCKDIMX (32) #endif #ifndef BLOCKD...
the_stack
#include <cuda.h> #include <cuda_runtime.h> #define L {{L}} #define N {{N}} #define D {{D}} #if D > N #error "D must be less or equal N" #endif #define min_macros(a,b) ((a) < (b) ? (a) : (b)) #define ASSET_DEBUG {{ASSET_DEBUG}} #define ULL unsigned long long /** * The maximum number of thre...
the_stack
#ifdef _WIN32 #pragma warning(disable : 4244) #endif #include "orttraining/training_ops/cuda/tensor/gather_grad_impl.h" #include <cub/device/device_radix_sort.cuh> #include <cub/device/device_reduce.cuh> #include <cub/device/device_run_length_encode.cuh> #include <cub/device/device_scan.cuh> #include <cub/iterator/co...
the_stack
template <unsigned int block_size> __global__ void FarthestPointSamplingKernel( // clang-format off const at::PackedTensorAccessor64<float, 3, at::RestrictPtrTraits> points, const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits> lengths, const at::PackedTensorAccessor64<int64_t, 1, at::Rest...
the_stack
#define BLOCK_DIM_X 1024 #define BLOCK_DIM_Y 1 #define MPI_CALL(call) \ { \ int mpi_status = call; \ ...
the_stack
using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif long fsize(int fd) { struct stat stat; int res = fstat(fd, &stat); return stat.st_size; } int printll(char *s) { while (*s != '\n' && *s != ',' && *s != '\t') { putchar(*s++); } return 0; } long hash(char *str0, int len) { uns...
the_stack
using namespace std; #define EIGEN_USE_GPU #define maxThreadsPerBlock 1024 __global__ void _qsgdreduceSumV2(float *g_odata, float *g_idata, unsigned int n) { extern __shared__ float sdata[]; // set thread ID unsigned int tid = threadIdx.x; unsigned int gridSize = blockDim.x * gridDim.x; unsigned ...
the_stack
#ifndef HELPER_MATH_H #define HELPER_MATH_H #include <cuda.h> typedef unsigned int uint; typedef unsigned short ushort; typedef unsigned char uchar; typedef uchar3 bool3; #ifndef __CUDACC__ #include <math.h> inline __host__ __device__ int imax3 (int a, int b, int c) { return (a>b) ? ((a>c) ? a : c) : ((b>c) ? b :...
the_stack
#if defined(USE_ROCM) #include <cfloat> #endif using caffe2::utils::RotatedBox; namespace caffe2 { namespace { __global__ void GeneratePreNMSUprightBoxesKernel( const int* d_sorted_scores_keys, const int nboxes_to_generate, const float* d_bbox_deltas, const float4* d_anchors, const int H, cons...
the_stack
namespace kfusion { namespace device { texture<ushort, 2> dprev_tex; texture<Normal, 2> nprev_tex; texture<Point, 2> vprev_tex; struct ComputeIcpHelper::Policy { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, CTA_SIZE = CTA_SI...
the_stack
#include "utils.cuh" #include <limits> #include <raft/device_atomics.cuh> namespace raft { namespace mst { namespace detail { template <typename vertex_t, typename edge_t, typename alteration_t> __global__ void kernel_min_edge_per_vertex(const edge_t* offsets, const vertex...
the_stack
#pragma once #include <gunrock/util/sort_device.cuh> #include <gunrock/app/enactor_base.cuh> #include <gunrock/app/enactor_iteration.cuh> #include <gunrock/app/enactor_loop.cuh> #include <gunrock/app/gtf/gtf_problem.cuh> #include <gunrock/oprtr/oprtr.cuh> #define debug_aml(a...) #include <gunrock/app/mf/mf_enactor.cu...
the_stack
#include <aggregation/coarseAgenerators/thrust_coarse_A_generator.h> #include <thrust/system/detail/generic/reduce_by_key.h> #include <thrust/remove.h> #include <thrust/iterator/transform_iterator.h> #include <error.h> #include <cutil.h> #include <types.h> #include <cusp/detail/format_utils.h> namespace amgx { namesp...
the_stack
#define CMAX(x,y,z) (imax3(x,y,z)-imin3(x,y,z)) #define RGBA2INT(r,g,b,a) ( (uint((a)*255.0f)<<24) | (uint((b)*255.0f)<<16) | (uint((g)*255.0f)<<8) | uint((r)*255.0f) ) #define CLR2INT(c) ( (uint((c.w)*255.0f)<<24) | (uint((c.z)*255.0f)<<16) | (uint((c.y)*255.0f)<<8) | uint((c.x)*255.0f) ) #define INT2CLR(c) ( ma...
the_stack
Compile: nvcc -arch=sm_52 -O3 -lcublas -lcurand -o LSTM LSTM.cu To enable/disable different performance options add the flat -DPERFOPTSx Where x is a bitmask defining the options used (see below). Run: ./LSTM or ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch> Example (run on a...
the_stack
#include <iostream> #include <math.h> #include "LocalHistogramEqualization.h" using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 定义图像灰度值个数 #define HISTO_NUM 256 // Kernel 函数:_hisEqualHistoKer(并行实现所有窗口的直方图统计) // 根据分割数将图像分割成窗口后,对每个窗口进行直方图统计工作。...
the_stack
#include <gflags/gflags.h> #include <glog/logging.h> #include <cuda_profiler_api.h> #include "cuNVSM/model.h" #include "cuNVSM/hdf5.h" #include "cuNVSM/gradient_check.h" DEFINE_uint64(num_epochs, 100000, "Number of training iterations."); DEFINE_uint64(document_cutoff, 0, "Number of documents per epoch (default: a...
the_stack
#include "lbann/operators/math/unary.hpp" #include "lbann/base.hpp" #include "lbann/utils/gpu/helpers.hpp" #include "common.cuh" namespace lbann { namespace { // ========================================================= // Operator objects for entry-wise unary layers // ============================================...
the_stack
namespace arboretum { namespace core { using thrust::device_vector; using thrust::host_vector; template <typename SUM_T, typename NODE_VALUE_T> __global__ void gain_kernel( const SUM_T *const __restrict__ left_sum, const NODE_VALUE_T *const __restrict__ segments_fvalues, const unsigned span, const SUM_T *const _...
the_stack
#ifdef _WIN32 #pragma warning (push) #pragma warning (disable : 4244 4267 4521) #endif #include <cusp/csr_matrix.h> #include <cusp/io/matrix_market.h> #ifdef _WIN32 #pragma warning (pop) #endif #include <types.h> #include <iomanip> #include <map> #include <basic_types.h> #include <matrix.h> #include <amgx_timer.h> #i...
the_stack
#include "miner.h" extern "C" { #include "sph/sph_blake.h" } #include "cuda_helper.h" #ifdef __INTELLISENSE__ #define __byte_perm(x, y, b) x #endif /* threads per block and nonces per thread */ #define TPB 768 #define NPT 384 #define maxResults 8 __constant__ uint32_t _ALIGN(16) c_data[20]; /* 8 adapters max */ s...
the_stack
// For compatibility with Pytorch 1.1 #ifndef TORCH_CHECK #define TORCH_CHECK AT_CHECK #endif #define BFLY_BENCHMARK false #define BFLY_MAX5_BENCHMARK false // Only support float (not double) for now to speed up compilation time #undef AT_DISPATCH_FLOATING_TYPES #define AT_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) ...
the_stack
namespace dart { inline __host__ __device__ unsigned char clamp(int c) { return min(max(0,c),255); } // h: 0-360 // s: 0 - 1 // v: 0 - 1 inline __host__ __device__ uchar3 hsv2rgb(float h, float s, float v) { float c = v*s; float hPrime = h/60.0f; float x = c*(1 - fabs(fmodf(hPrime,2) - 1)); float ...
the_stack
* \file * cub::DeviceReduce provides device-wide, parallel operations for computing a reduction across a sequence of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include <limits> #include "dispatch/dispatch_reduce.cuh" #include "dispatch/dispatch_redu...
the_stack
#include <cmath> #include <cfloat> #include <cstdio> #include <cstdlib> #include <cassert> #include <iostream> #include <algorithm> #include <complex> #include "config.h" #include "symbol.h" #include "dvector.h" #include "util.h" #include "recfilter.h" namespace rod { #if CUDA_SM >= 20 # define W1 8 # define NB...
the_stack
#include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/logical.h> #include <thrust/sequence.h> #include <thrust/transform.h> namespace cudf { namespace lists { namespace detail { namespace { /** * @brief Concatenate lists within the same row into one list, ignoring any null list during ...
the_stack
using namespace std; typedef ML::CUDA::Test_Buckets_Binsym::Float Float; typedef ML::CUDA::Test_Buckets_Binsym::TwoBuckets TwoBuckets; typedef ML::shift_t shift_t; /** Execution kernel Parameters: - example_data: structure (packed into memory: 4-12 bytes per entry, size is number of feature occurrences...
the_stack
#include "Device/Util/SafeCudaAPI.cuh" //__cudaErrorHandler #include <cassert> //assert #if defined(NEVER_DEFINED) #include "SafeFunctions_.cuh" #endif ///@cond #define cuMemcpyDevToDev(...) \ xlib::detail::cuMemcpyDevToDevAux(__FILE__, __L...
the_stack
// update: updated to use long for some integers associated with file size to support large images. // Cunren Liang, 26-MAR-2018 #include <cuda_runtime.h> #include <math.h> #include <stdio.h> #include <sys/time.h> #define THRD_PER_BLOCK 96 // Number of threads per block (should always %32==0) // ---------...
the_stack
#define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define CUDA_1D_KERNEL_LOOP(i, n) \...
the_stack
#define BIN_SIZE 32 using namespace std; #define CHECK(res) if(res!=cudaSuccess){exit(-1);} #define BLOCKNUM 1024 #define THREADNUM 64 __global__ void _k_copy_padding_data_blob_gpu(float_t *data_input, float_t *data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockI...
the_stack
#include "common/omptarget.h" #include "common/target_atomic.h" #include "target_impl.h" EXTERN void __kmpc_nvptx_end_reduce(int32_t global_tid) {} EXTERN void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {} EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) { return __kmpc_impl_sh...
the_stack
#include "BoundingRect.h" #include <iostream> #include <fstream> #include <cmath> #include <algorithm> using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:DEF_SHARED_LENGTH // 定义了核函数中共享内存的长度 #define DEF_SHARED_LENGTH(sharedarray) (DEF_BLOCK_X * DE...
the_stack
using namespace facebook::cuda; namespace facebook { namespace deeplearning { namespace torch { namespace detail { template <int BatchSize, typename T> __launch_bounds__(256, 6) __global__ void accGradWeight(DeviceTensor<T, 4> input, DeviceTensor<float, 4> gradOutput, ...
the_stack
#include "caffe/layers/pooling_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, c...
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include "cuda_util.h" #include <iostream> #include "mat.h" __global__ void gpu_concat_forward_dim1(void* inputs, const ncnn::CudaMatInfo* input_info, int* input_offset, const int input_size, ...
the_stack
#include <thrust/host_vector.h> #include "hist_util.h" #include "quantile.cuh" #include "device_helpers.cuh" #include "timer.h" #include "../data/device_adapter.cuh" namespace xgboost { namespace common { namespace detail { struct EntryCompareOp { __device__ bool operator()(const Entry& a, const Entry& b) { if...
the_stack
\brief Convolution 2D profiling */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "conv2d_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// using namespace c...
the_stack
#include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <thrust/iterator/counting_iterator.h> namespace cudf { namespace strings { namespace detail { namespace { /** * @brief Compute string sizes, string validities, and concatenate strings functor. * * This functor is executed twice. In the...
the_stack
extern "C" { #include <ccv.h> #include <ccv_internal.h> #include <nnc/ccv_nnc.h> #include <nnc/ccv_nnc_easy.h> #include <nnc/ccv_nnc_internal.h> } #include <nnc/gpu/ccv_nnc_compat.h> #ifdef HAVE_CUDA static int _ccv_nnc_gemm_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* c...
the_stack
//------------------------------------------------------------------------ // Common op attribute parser. static __host__ void parseOpAttributes(OpKernelConstruction* ctx, TextureKernelParams& p) { // Mip and filter modes. OP_REQUIRES_OK(ctx, ctx->GetAttr("filter_mode", &p.filterMode)); OP_REQUIRES(ctx, p....
the_stack
#define NVBIO_CUDA_DEBUG #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvbio/basic/vector.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/basic/cuda/condition.h> #include <cub/cub.cuh> namespace nvbio { nam...
the_stack
* This file contains an implementation of some batched sparse matrix * operations in Compressed Sparse Row representation. * * Important: the implementation is designed to give good performance on * large batches of relatively small matrices (typically one or two * elements per row). In other use cases it might be...
the_stack
#include <ATen/cuda/CUDAApplyUtils.cuh> typedef enum { SUM = 0, MEAN = 1, MAX = 2 } reduce_t; #define CHECK_CUDA(x) \ TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) \ TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) \ CHECK_CUDA(x); ...
the_stack
#include <torch/extension.h> #include <cstdint> #include "cuda_util.cuh" #include "data_spec_packed.cuh" namespace { namespace device { __global__ void sample_grid_sh_kernel( PackedSparseGridSpec grid, const torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> points, // Output ...
the_stack
namespace AggMIS { bool CheckCudaError(cudaError_t code, const char* file, int line) { if (code != cudaSuccess) { std::cout << "\n***************** CUDA Error detected ***************\n"; std::cout << "Error: " << cudaGetErrorString(code) << "\n"; std::cout << "In file " << file << " line " << l...
the_stack