text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include <ctime> #include <random> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include "core/components/fill_array_kernels.hpp" #include "cuda/base/config.hpp" #include "cuda/base/cublas_bindings.hpp" #include "cuda/base/curand_bindings.hpp" #include "cuda/base/math.hpp" ...
the_stack
namespace mn { using block_signed_distance_field_ = structural<structural_type::dense, decorator<structural_allocation_policy::full_allocation, structural_padding_policy::sum_pow2_align>, BlockDomain, attrib_layout::soa, f32_, f32_, f32_, f32_>;...
the_stack
#include "CUFLU.h" #if ( MODEL == HYDRO ) // external functions #ifdef __CUDACC__ #include "CUFLU_Shared_FluUtility.cu" #else // #ifdef __CUDACC__ void Hydro_Rotate3D( real InOut[], const int XYZ, const bool Forward, const int Mag_Offset ); void Hydro_Con2Flux( const int XYZ, real Flux[], const real In[], const ...
the_stack
#include <stdio.h> texture<float4, 1, cudaReadModeElementType> texNodeSize; texture<float4, 1, cudaReadModeElementType> texNodeCenter; texture<float4, 1, cudaReadModeElementType> texMultipole; texture<float4, 1, cudaReadModeElementType> texBody; __device__ int ngb_cnt(float3 pos_i, float h_i, ...
the_stack
using namespace std; typedef uint8_t uint8; typedef unsigned int uint32; typedef unsigned long long int uint64; #define STREAM_BLOCK 16 #define BLOCK_SIZE 32 #define BLOCK_D_SIZE 64 #define INTEGRAL_BLOCK_SIZE 8 #define XDIM_MAX_THREADS 1024 #define SHARED_MEMORY 49152 #define XDIM_H_THREADS 512 #define XDIM_Q_THREADS ...
the_stack
using namespace FW; //------------------------------------------------------------------------ // Global variables. //------------------------------------------------------------------------ __constant__ int4 c_input[(sizeof(AmbientInput) + sizeof(int4) - 1) / sizeof(int4)]; __device__ S32 g_warpCounter; //...
the_stack
#include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/transform.hpp" #include "opencv2/core/cuda/color.hpp" #include "cvt_color_internal.h" namespace cv { namespace cuda { namespace device { OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type) { enum { smart_bloc...
the_stack
static inline void THNN_(VolumetricAveragePooling_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, int kT, int kW, int kH, int dT, int dW, int dH, i...
the_stack
* @file reader_impl.cu * @brief cuDF-IO CSV reader class implementation */ #include "csv_common.h" #include "csv_gpu.h" #include <io/comp/io_uncomp.h> #include <io/utilities/column_buffer.hpp> #include <io/utilities/hostdevice_vector.hpp> #include <io/utilities/parsing_utils.cuh> #include <io/utilities/type_convers...
the_stack
namespace faiss { namespace gpu { template < typename T, int Dim, bool InnerContig, typename IndexT, template <typename U> class PtrTraits> __host__ __device__ Tensor<T, Dim, InnerContig, IndexT, PtrTraits>::Tensor() : data_(nullptr) { static_assert(Dim > 0, ...
the_stack
void deformable_im2col(DArrayLite data_im, DArrayLite data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, ...
the_stack
******************************************************************************/ #pragma once #include "radixsort_reduction_kernel.cu" #include "radixsort_spine_kernel.cu" #include "radixsort_scanscatter_kernel.cu" namespace b40c { /****************************************************************************** * S...
the_stack
#include "correlation_cuda_kernel.cuh" #define CUDA_NUM_THREADS 1024 #define THREADS_PER_BLOCK 32 #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAApplyUtils.cuh> using at::Half; template <typename scalar_t> __global__ void channels_first(const scalar_t* __...
the_stack
#include <raft/cudart_utils.h> #include <raft/cuda_utils.cuh> #include <raft/linalg/binary_op.cuh> #include <raft/linalg/unary_op.cuh> #include <raft/random/rng.hpp> #include <algorithm> #include <numeric> #include <random> #include <stack> #include <device_launch_parameters.h> #include <rmm/device_uvector.hpp> name...
the_stack
#include "IPsecAES_kernel.hh" #include <openssl/aes.h> #include <openssl/md5.h> /* The index is given by the order in get_used_datablocks(). */ #define dbid_enc_payloads_d (0) #define dbid_flow_ids_d (1) #define dbid_iv_d (2) #define dbid_aes_block_info_d (3) #ifndef __AES_CORE__ /*same constants...
the_stack
#include "SegmentedScan.h" #include <cmath> #include <iostream> #include <stdio.h> using namespace std; // 宏:SEG_SCAN_PACK // 定义了核函数中每段处理的数量。 #define SEG_SCAN_PACK 16 // 宏:SEG_SCAN_PACK_NUM // 定义了核函数中处理的段数。 #define SEG_SCAN_PACK_NUM 64 // 宏:SE...
the_stack
// Modifications: CUDA implementation of CPU verison // Copyright 2020 Netease Fuxi AI LAB // SPDX-License-Identifier: Apache-2.0 #include "rasterize_triangles_cuda_impl.h" namespace pytorch_mesh_renderer { // Takes the minimum of a, b, and c, rounds down, and converts to an integer // in the range [low, hig...
the_stack
#pragma once #include "cuda/Complex.cuh" #include "cuda/ComputeCapabilities.cuh" #include "cuda/CudaUtils.cuh" #include "cuda/DeviceTensor.cuh" #include "cuda/fbfft/FBFFTCommon.cuh" #include "cuda/fbfft/FBFFTParameters.h" #include "cuda/fbfft/FFT2D32.cuh" #include "cuda/util/CachedDeviceProperties.h" #include <cuda_r...
the_stack
#include "dragon/core/context_cuda.h" #include "dragon/utils/conversions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { template <typename T> __global__ void _Relu(const int N, const float alpha, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = __ldg(x + i) > ...
the_stack
// CUDA Global Memory variables // Debug counters for some sanity checks #ifdef _DEBUG __device__ size_t debug_d_n_voxels_marked = 0; __device__ size_t debug_d_n_triangles = 0; __device__ size_t debug_d_n_voxels_tested = 0; #endif // Possible optimization: buffer bitsets (for now: Disabled because too much o...
the_stack
#include "caffe/filler.hpp" #include "caffe/layers/inner_distance_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { #define sign(x) (Dtype(0) < (x)) - ((x) < Dtype(0)) template <typename Dtype> __global__ void kernel_channel_dot(const int num, const int dim, ...
the_stack
#pragma once #include <gunrock/app/enactor_base.cuh> #include <gunrock/app/enactor_iteration.cuh> #include <gunrock/app/enactor_loop.cuh> #include <gunrock/oprtr/oprtr.cuh> #include <gunrock/app/knn/knn_problem.cuh> #include <gunrock/app/knn/knn_helpers.cuh> #include <gunrock/util/scan_device.cuh> #include <gunrock/u...
the_stack
// TODO: Run some/all tests for half-precision floating-point values, e.g __half from: // #include <cuda_fp16.h> // TODO: Also test behavior with warps with some inactive/exited lanes #include <kat/detail/execution_space_specifiers.hpp> using std::vector; using std::uniform_int_distribution; template <typename F> i...
the_stack
#include "_reg_common_gpu.h" __device__ __constant__ int c_UseBSpline; __device__ __constant__ int c_VoxelNumber; __device__ __constant__ int c_ControlPointNumber; __device__ __constant__ int3 c_ReferenceImageDim; __device__ __constant__ int3 c_ControlPointImageDim; __device__ __constant__ float3 c_ControlPointVoxelSp...
the_stack
DnnHandle init_cudnn(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, HighLevelRuntime *runtime) { assert(regions.size() == 0); assert(task->arglen == sizeof(size_t)); size_t workSpaceSize = *(const size_t*) task->args; DnnHandle handle; hand...
the_stack
// The key generator template<typename T, typename set_hasher = MurmurHash3_32<T>> class KeyGenerator { public: KeyGenerator(): gen_(rd_()) {} KeyGenerator(T min, T max): gen_(rd_()), distribution_(min, max) {} void fill_unique(T* data, size_t keys_per_set, size_t num_of_set, T empty_value) { if (k...
the_stack
#include "common.h" #include "polish_E.cu" //#include "polydet.cu" //#include "sturm.cu" //#include "polyquotient.cu" //#include "cheirality.cu" #include "kernel_functions.cu" // all CUDA definitions must be compiled in the same logical compilation unit // Never compile FooDevice.cu - import it into main.cu //#include...
the_stack
//////////////////////////////////////////////////////////////////////////////// void get_frustum_bounds(float* K, std::vector<std::vector<float>> &extrinsic_poses, int base_frame, int curr_frame, float* camera_relative_pose, float* view_bounds, float vox_unit, int* vox_size, float* vox_range_c...
the_stack
//////////////////////////////////////////////////////////////////////// Full Volume Scan6 enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 6, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y, MAX_LOCAL_POINTS = 3 }; __device__ int global_count = 0; __device__ int output_count; __device__ unsigned int blocks_done = 0; __shared__ flo...
the_stack
#include "fast_lsh_cumulation_cuda.h" #include "common_cuda_device.h" #include "common_cuda.h" #include "common.h" #include <stdio.h> ////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////...
the_stack
// store allocated textures device addresses static unsigned int* d_textures[MAX_GPUS][4]; #define mixtab0(x) (*((uint32_t*)mixtabs + ( (x)))) #define mixtab1(x) (*((uint32_t*)mixtabs + (256+(x)))) #define mixtab2(x) (*((uint32_t*)mixtabs + (512+(x)))) #define mixtab3(x) (*((uint32_t*)mixtabs + (768+(x)))) static ...
the_stack
namespace nvbio { namespace bowtie2 { namespace cuda { template <typename T> T* resize(bool do_alloc, thrust::device_vector<T>& vec, const uint32 size, uint64& bytes) { bytes += size * sizeof(T); if (do_alloc) { vec.resize( size ); return thrust::raw_pointer_cast(&vec.front()); } re...
the_stack
* An implementation of segmented reduction using a load-balanced parallelization * strategy based on the MergePath decision path. ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <iterator> #include <vect...
the_stack
#include "utility.hpp" using namespace ppl::common; namespace ppl { namespace cv { namespace cuda { #define MAX_BLOCKS 128 __global__ void unmaskedCalcHistKernel0(const uchar* src, int size, int* histogram) { __shared__ int local_histogram[256]; int element_x = (blockIdx.x << 8) + threadIdx.x; int index_x = ...
the_stack
* \file * Operations for writing linear segments of data from the CUDA thread block */ #pragma once #include <iterator> #include "block_exchange.cuh" #include "../util_ptx.cuh" #include "../util_macro.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX ...
the_stack
namespace caffe { #ifdef TODO_REFACTOR #ifdef USE_OPENCL cl_mem weight_image_; const SyncedMemory * copied_weight_data_; bool test_only_; uint64_t weight_image_seq_; gemm_type_t innerprod_type_; bool tuned_; stringstream cache_path_; string key_; #endif #ifdef USE_OPENCL virtual void generate_key()...
the_stack
#include <cusp/ell_matrix.h> #include <cusp/multiply.h> template <typename MemorySpace> void TestEllMatrixView(void) { typedef int IndexType; typedef float ValueType; typedef typename cusp::ell...
the_stack
#include<types.h> #include<cutil.h> #include <vector> template <class Matrix, class Vector> SmoothedMG_AMG_Level<Matrix, Vector>::SmoothedMG_AMG_Level(AMG<Matrix, Vector> *amg) : AMG_Level<Matrix, Vector>(amg) { aggregator = Aggregator<Matrix, Vector>::allocate(amg->aggregatorType_); // DHL } template <class Mat...
the_stack
namespace sqint = sqaod_internal; using namespace sqaod_cuda; #define SQAODC_VECTORIZE_JQ #ifdef SQAODC_VECTORIZE_JQ template<class real> using DotJq = DeviceDotJqVec4<real, real*>; #else template<class real> using DotJq = DeviceDotJq<real, real*>; #endif template<class real> using DotSpins = DeviceDotSpins<real, ch...
the_stack
//#include <rendercheck_gl.h> #if defined(__APPLE__) || defined(MACOSX) #include <GLUT/glut.h> #else #include <GL/glut.h> #endif #include "fluidsGL_kernels.cuh" #define MAX_EPSILON_ERROR 1.0f // Define the files that are to be save and the reference images for validation const char *sOriginal[] = { "fluidsGL.p...
the_stack
#pragma once namespace gunrock { namespace app { /** * @brief Base data slice structure which contains common data structural needed * for primitives. * * @tparam SizeT Type of unsigned integer to use for array * indexing. (e.g., uint32) * @tparam VertexId Type of signed integer to use ...
the_stack
//#include "util.h" #include "util.cuh" #include "util_type.h" #include "util_type_internal.h" #include "util_func.h" #include "update_ops_cuda.h" #include "update_ops_cuda_device_functions.h" __global__ void H_gate_gpu(unsigned int target_qubit_index, GTYPE *state_gpu, ITYPE dim) { ITYPE j = blockIdx.x * blockDim.x ...
the_stack
// includes, system #include <stdio.h> #include <stdlib.h> // includes, project #include <sdkHelper.h> // helper for shared that are common to CUDA SDK samples #include <shrQATest.h> // This is for automated testing output (--qatest) #include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #include <cuda.h>...
the_stack
Parallel reduction kernels */ #ifndef _REDUCE_KERNEL_H_ #define _REDUCE_KERNEL_H_ #if 1 #define EMUSYNC __syncthreads() #else #define EMUSYNC #endif #include <device_functions.h> /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n/2 threads - only works f...
the_stack
static const uint64_t old1_T0[256] = { 0x78D8C07818281818, 0xAF2605AF23652323, 0xF9B87EF9C657C6C6, 0x6FFB136FE825E8E8, 0xA1CB4CA187948787, 0x6211A962B8D5B8B8, 0x0509080501030101, 0x6E0D426E4FD14F4F, 0xEE9BADEE365A3636, 0x04FF5904A6F7A6A6, 0xBD0CDEBDD26BD2D2, 0x060EFB06F502F5F5, 0x8096EF80798B7979, 0xCE305FCE6FB16F6...
the_stack
#include "decode.hpp" using namespace std; #define B_ELEM_PT 16 // each thread process 16 var. firstly #define B_ELEM_BITSHIFT 4 // log2(B_ELEM_PT) #define B_THREADS_PER_BLOCK 64 #define B_BLOCK_VAR_NUMS (B_ELEM_PT*B_THREADS_PER_BLOCK) template <typename T> __forceinline__ __device__ void compAndSwapIndices(T*...
the_stack
#include <cub/cub.cuh> #include <cublas_v2.h> #include <cuda_fp16.h> #include <cuda_runtime.h> #include <math_constants.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "longformer_attention_softmax.h" #include "attention_impl.h" using namespace onnxruntime::cud...
the_stack
#include <utility> #include "paddle/fluid/framework/custom_tensor_utils.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/complex.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platf...
the_stack
#include "pcl/gpu/features/device/rodrigues.hpp" #include "pcl/gpu/features/device/pair_features.hpp" namespace pcl { namespace device { struct PpfImpl { enum { CTA_SIZE = 256 }; PtrSz<PointType> points; const NormalTy...
the_stack
#include "_reg_optimiser_gpu.h" #include "_reg_optimiser_kernels.cu" /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ reg_optimiser_gpu::reg_optimiser_gpu() :reg_optimiser<float>::reg_optimiser() { this->currentDOF...
the_stack
#define TPB1 32 #define TPB2 128 #define Nrow 4 #define Ncol 4 #define u64type uint4 #define vectype uint48 #define memshift 3 __device__ __forceinline__ uint2 LD4S(const int index,const uint2* shared_mem) { return shared_mem[(index * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x]; } __device__ __forceinline...
the_stack
#include <functional> #include <algorithm> THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN const size_t NUM_SAMPLES = 10000; template <class InputVector, class OutputVector, class Operator, class ReferenceOperator> void TestUnaryFunctional(void) { typedef typename InputVector::value_type InputType; ...
the_stack
#include <cuda.h> #include <limits.h> #include "THCHalf.h" /// Class for numeric limits of the particular data type, which /// includes support for `half`. /// Unfortunately since `half` does not have a constructor, these have /// to be expressed as functions (either that or non-const statics). template <typename T> s...
the_stack
#include "flowfilter/gpu/util.h" #include "flowfilter/gpu/error.h" #include "flowfilter/gpu/update.h" #include "flowfilter/gpu/device/update_k.h" namespace flowfilter { namespace gpu { FlowUpdate::FlowUpdate() : Stage() { __configured = false; __inputFlowSet = false; __inputImageSet = false; __i...
the_stack
* \file * cub::DeviceReduce provides device-wide, parallel operations for computing a reduction across a sequence of data items residing within global memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch/device_reduce_dispatch.cuh" #include "../util_namespace.cuh" /// Optional outer n...
the_stack
#pragma once #include <limits> #include <atomic> #include <gunrock/util/cuda_properties.cuh> //#include <gunrock/util/types.cuh> #ifndef MEMBERBASK #define MEMBERMASK 0xffffffffu #endif #ifndef MEMBERMASK #define MEMBERMASK 0xffffffffu #endif #ifndef WARPSIZE #define WARPSIZE 32 #endif #if (__CUDACC_VER_MAJOR__ >=...
the_stack
#define DETERMINE_INDICES_4_GPU()\ int tx = KW_LOCAL_ID_0;\ int state = tx & 0x3;\ int pat = tx >> 2;\ int patIdx = KW_LOCAL_ID_1;\ int matrix = KW_GROUP_ID_1;\ int pattern = __umul24(KW_GROUP_ID_0, PATTERN_BLOCK_SIZE * 4) + multBy4(patIdx) + pat;\ int deltaPartialsByState = multBy16(KW_GROU...
the_stack
#include <cstdlib> #include <ctime> #include <cstdio> using namespace akg_reduce; using namespace std; template <typename T> void CompareResults(T *arr1, T *arr2, int len) { double total_err = 0.0; bool flag = true; for (auto i = 0; i < len; i++) { if (std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransfo...
the_stack
#include "k2/csrc/ragged_ops.h" #include "k2/python/csrc/torch/torch_util.h" #include "k2/python/csrc/torch/v2/autograd/index_and_sum.h" #include "k2/python/csrc/torch/v2/autograd/normalize.h" #include "k2/python/csrc/torch/v2/autograd/sum.h" #include "k2/python/csrc/torch/v2/ragged_any.h" namespace k2 { static void ...
the_stack
////////////////////////////// ////////////////////////////// ////////////////////////////// //Helper functions for leaf-nodes __device__ void compute_monopole(double &mass, double &posx, double &posy, double &posz, float4 pos){ mass += pos.w; p...
the_stack
#include "EdgeCheck.h" #include <iostream> #include <cmath> using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:DEF_BLOCK_1D // 定义一维块大小。 #define DEF_BLOCK_1D 256 // 宏:DEF_COL_MAX // 定义欧式距离最大列数。 #define DEF_COL_MAX 1024 // 宏:DEF_HUMOM_SIZE // ...
the_stack
/////////////////////////////////////////////////////////////////////////// // Copyright 1993-2012 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any ...
the_stack
#include <cub/cub.cuh> #include <cub/device/device_scan.cuh> #include <libvis/cuda/cuda_auto_tuner.h> #include "badslam/cuda_util.cuh" #include "badslam/cuda_matrix.cuh" #include "badslam/surfel_projection_nvcc_only.cuh" #include "badslam/util.cuh" #include "badslam/util_nvcc_only.cuh" namespace vis { __global__ voi...
the_stack
#include "FunctionPointers_kernels.h" // Texture reference for reading image texture<unsigned char, 2> tex; extern __shared__ unsigned char LocalBlock[]; static cudaArray *array = NULL; #define RADIUS 1 // pixel value used for thresholding function, works well with sample image 'lena' #define THRESHOLD 150.0f #ifde...
the_stack
#define TPB 512 #define TFBIGMIX8e(){\ p0+=p1;p2+=p3;p4+=p5;p6+=p7;p1=ROTL64(p1,46) ^ p0;p3=ROTL64(p3,36) ^ p2;p5=ROTL64(p5,19) ^ p4;p7=ROTL64(p7,37) ^ p6;\ p2+=p1;p4+=p7;p6+=p5;p0+=p3;p1=ROTL64(p1,33) ^ p2;p7=ROTL64(p7,27) ^ p4;p5=ROTL64(p5,14) ^ p6;p3=ROTL64(p3,42) ^ p0;\ p4+=p1;p6+=p3;p0+=p5;p2+=p7;p1=ROTL64(...
the_stack
#include <vector> #include "thrust/functional.h" #include "thrust/sort.h" namespace anakin { namespace saber { // caffe util_nms.cu #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const *const a, float const *const b) {...
the_stack
* \file * AgentScanByKey implements a stateful abstraction of CUDA thread blocks for participating in device-wide prefix scan by key. */ #pragma once #include <iterator> #include "single_pass_scan_operators.cuh" #include "../block/block_load.cuh" #include "../block/block_store.cuh" #include "../block/block_scan.cu...
the_stack
using namespace optix; // Array of texture sampler IDs for the material expression associated with this OptiX program. rtDeclareVariable(int, texture_sampler_ids, , ); typedef rtBufferId<int> BufferInt; // The wrap mode determines the texture lookup behavior if a lookup coordinate // is exceeding the normalized hal...
the_stack
#include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/basic/cuda/ldg.h> #include <nvbio/strings/string_set.h> #include <thrust/device_vector.h> namespace nvbio { namespace string_...
the_stack
namespace arboretum_test { using arboretum::core::ContinuousTreeGrower; using arboretum::core::GainFunctionParameters; using arboretum::core::my_atomics; TEST(ContinuousTreeGrower, DISABLED_RootSearchCategoryFeature) { const size_t size = 32; auto grower = ContinuousTreeGrower<unsigned int, unsigned short, flo...
the_stack
// CHECK: #include <hip/hip_runtime.h> #include <cuda.h> #include <string> #include <stdio.h> int main() { printf("09. CUDA Driver API Functions synthetic test\n"); unsigned int flags = 0; size_t bytes = 0; size_t bytes_2 = 0; void* image = nullptr; std::string name = "str"; // CHECK: hipDevice_t device...
the_stack
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under...
the_stack
#include <isce3/core/Constants.h> #include <isce3/core/DateTime.h> #include <isce3/core/Ellipsoid.h> #include <isce3/core/LookSide.h> #include <isce3/core/LUT1d.h> #include <isce3/core/Peg.h> #include <isce3/core/Pegtrans.h> #include <isce3/core/Projections.h> #include <isce3/geometry/DEMInterpolator.h> #include <isce3...
the_stack
#define CUB_STDERR #include <iterator> #include <cub/warp/warp_load.cuh> #include <cub/iterator/cache_modified_input_iterator.cuh> #include <cub/iterator/discard_output_iterator.cuh> #include <cub/util_allocator.cuh> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sequence.h> #inc...
the_stack
#include "common.h" #include "multilgKernels.h" #include "transformerKernels.h" /** @file Implemented the cuda kernel function and its launcher that required by multilingual nmt model. Currently, fp16 and fp32 versions are provided */ namespace lightseq { namespace cuda { /** @brief: ker_multilg_enc_emb for encoder, lo...
the_stack
struct cmp_functor_dict { const unsigned long long* source; bool *dest; const unsigned int *pars; cmp_functor_dict(const unsigned long long int* _source, bool * _dest, const unsigned int * _pars): source(_source), dest(_dest), pars(_pars) {} template <typename IndexType> __host__ __device__ void operator()(...
the_stack
namespace hornets_nest { __device__ __forceinline__ void initialize(vid_t diag_id, vid_t u_len, vid_t v_len, vid_t* __restrict__ u_min, vid_t* __restrict__ u_max, vid_t* __restrict__ v_min, vid_t* __restrict__ v_max, ...
the_stack
#include<cublas_v2.h> #include<iostream> #include<vector> #include<logger.hpp> #include<utils.hpp> using namespace livai::tts::waveglow; using namespace livai::tts::common; __forceinline__ __device__ float sigmoidf(float in) { return 1.f / (1.f + expf(-in)); } /* kernel to apply gated activation function on inp...
the_stack
#include <cfloat> #include <vector> #include "caffe/layers/warp_layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_util.cuh" namespace caffe { template <typename Dtype> __global__ void truncate_interp2_fwd(const int nthreads, const Dtype *bottom_0_data_, const Dtype *bottom_1_data_, ...
the_stack
#include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" #include "opencv2/core/cuda/limits.hpp" namespace cv { namespace cuda { namespace device { namespace stereobp { /////////////////////////////////////////////////////////////// /////////////////////// load con...
the_stack
#define _4HALF2_ 4 #define _8HALF_ 8 #define _INT4_TO_4INT_ 4 #define _INT4_TO_8HALF_ 8 #define _INT4_TO_4FLOAT_ 4 #include <cuda_fp16.h> #include "cudakernel/common/macro.h" __global__ void MergeConvSplitResults( int4 *input, int4 *output, int split_height_v1, int split_width_v8, ...
the_stack
#include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" //#include "caffe/vision_layers.hpp" #include "ctpn_layers.hpp" namespace caffe { template <typename Dtype> __device__ Dtype sigmoid(const Dtype x) { return Dtype(1) ...
the_stack
#include "Open3D/Core/Kernel/BinaryEW.h" #include "Open3D/Core/CUDAState.cuh" #include "Open3D/Core/CUDAUtils.h" #include "Open3D/Core/Dispatch.h" #include "Open3D/Core/Kernel/CUDALauncher.cuh" #include "Open3D/Core/Tensor.h" namespace open3d { namespace kernel { template <typename scalar_t> static OPEN3D_HOST_DEVIC...
the_stack
#ifndef _DALI_KERNELS_REDUCE_REDUCE_AXES_GPU_IMPL_CUH #define _DALI_KERNELS_REDUCE_REDUCE_AXES_GPU_IMPL_CUH #include "dali/core/util.h" #include "dali/core/geom/vec.h" #include "dali/kernels/reduce/reductions.h" #include "dali/kernels/reduce/reduce_all_gpu_impl.cuh" #include "dali/kernels/reduce/reduce_common.cuh" #i...
the_stack
#include "WarpingSolverParameters.h" #include "WarpingSolverState.h" #include "WarpingSolverUtil.h" #include "WarpingSolverEquations.h" #include <assert.h> #include <stdio.h> #include <stdint.h> #include "CUDATimer.h" #ifdef _WIN32 #include <conio.h> #endif #ifdef _WIN32 #define EXPORT __declspec(dl...
the_stack
__global__ void gcrs_m_1_w_4_coding_dotprod( int k, int index, const long *__restrict in, long *__restrict out, const unsigned int *__restrict bm, int size) { HIP_DYNAMIC_SHARED(long, shared_data); int w = 4; int i,j; long result = 0; const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF; int...
the_stack
// Visualizing Errors in Rendered High Dynamic Range Images // Eurographics 2021, // by Pontus Andersson, Jim Nilsson, Peter Shirley, and Tomas Akenine-Moller. // Pointer to the paper: https://research.nvidia.com/publication/2021-05_HDR-FLIP. // FLIP: A Difference Evaluator for Alternating Images // High Performance G...
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <nppdefs.h> #include "cuda_util.h" #include "mat.h" #include "softmax_cuda.h" #include <iostream> __global__ void gpu_softmax_reduce_find_max_row(const float* a_input, const ncnn::CudaMatInfo a_info, float* scratchpad_memory) { e...
the_stack
#define TPB52 512 #define TPB50 512 /* ************************ */ __constant__ const uint2 buffer[152] = { {0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C434,0xEABE394C},{0x1A75B523,0x891112C7},{0x660FCC33,0xAE18A40B}, {0x9746DF03,0x0D...
the_stack
#define GET_BLOB_ADDRESS(ptr, offset) (&((ptr)[(offset)/sizeof((ptr)[0])])) #define GET_ARRAY_CAPACITY(ptr) (((long *)(ptr))[0]) #define GET_ARRAY_LENGTH(ptr) (((long *)(ptr))[1]) #define GET_ARRAY_BODY(ptr) (&((ptr)[128/sizeof((ptr)[0])])) #define SET_ARRAY_CAPACITY(ptr, val) { (((long *)(ptr))[0]) = (val); } #defi...
the_stack
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, th...
the_stack
#include <cugraph/serialization/serializer.hpp> #include <utilities/graph_utils.cuh> #include <raft/device_atomics.cuh> #include <thrust/copy.h> #include <type_traits> namespace cugraph { namespace serializer { template <typename value_t> void serializer_t::serialize(value_t val) { auto byte_buff_sz = sizeof(va...
the_stack
#include <nccl.h> #include <memory> #include <utility> namespace oneflow { namespace boxing { namespace collective { namespace { ncclRedOp_t GetNcclReduceOp(ReduceMethod reduce_method) { if (reduce_method == kReduceMethodSum) { return ncclRedOp_t::ncclSum; } else { UNIMPLEMENTED(); return ncclRedO...
the_stack
#include <assert.h> #include <stdio.h> #include <stdint.h> #include <sys/socket.h> // AF_LOCAL #include <poll.h> // POLL #include <sys/param.h> #include <rdma_dgram/rsocket.h> #define O_NONBLOCK 00004 #define O_GPUNET_BOUNCE 04000 #ifndef EWOULDBLOCK #define EWOULDBLOCK 11 #endif #ifndef UINT32_MAX #def...
the_stack
#include "cupoch/geometry/image.h" #include "cupoch/geometry/rgbdimage.h" #include "cupoch/odometry/odometry.h" #include "cupoch/odometry/rgbdodometry_jacobian.h" #include "cupoch/utility/platform.h" namespace cupoch { namespace odometry { namespace { struct initialize_correspondence_map_functor { initialize_cor...
the_stack
#include <torch/extension.h> //#include <torch/serialize/tensor.h> //#include <ATen/ATen.h> //#include <ATen/cuda/CUDAContext.h> #define CUDA_NUM_THREADS 256 #define THREADS_PER_BLOCK 64 #define DIM0(TENSOR) ((TENSOR).x) #define DIM1(TENSOR) ((TENSOR).y) #define DIM2(TENSOR) ((TENSOR).z) #define DIM3(TENSOR) ((TENS...
the_stack
#include "multi_bspline.h" #include "multi_bspline_create_cuda.h" //__constant__ float A[48]; // typedef struct // { // float *coefs_real, *coefs_imag; // uint3 stride; // float3 gridInv; // int num_splines; // } multi_UBspline_3d_c_cuda; #ifndef NO_CUDA_MAIN extern "C" multi_UBspline_3d_c_cuda* create_multi...
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <ATen/ATen.h> #include <ATen/core/Tensor.h> #include <THC/THCAtomics.cuh> namespace rubiks { template <typename T> __global__ void rubiks_shift_3d_forward_cuda(const int total_num_elements, \ const int N_dim, const int input_T_dim, \ const ...
the_stack
#include <stdio.h> #include <unistd.h> #include <stdlib.h> // The first kernel just computes Ainv * u and also stores the kth // row of Ainv in global memory __global__ static void update_inverse_cuda1 (float *Ainv_g[], float *u_g[], float *AinvT_u_g[], float *Ainv_colk_g[], int N, int rowstride, int k) { _...
the_stack
#include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/gather.h> #include <thrust/transform.h> #include <cstring> #include <algorithm> #include <exception> #include "algorithm.hpp" #include "iterator.hpp" #include "query/time_series_aggregate.h" #include "memory.hpp" C...
the_stack