text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include <assert.h> #include <stdio.h> #include <unistd.h> #include <complex> // Needed for std::min and max to work on device. #include <limits> int verbose = 0; #if __cplusplus >= 201103L #include <type_traits> // Convert a function into a functor with two arguments. We rely on SFINAE to // instantiate a functio...
the_stack
#define WARPSIZE 32 #include <cooperative_groups.h> // since we need to do a full O(N^2) computing and we don't need to broadcast the forces, // this should just be extremely efficient already // #define RADII_EXP 4, used for switching function shenanigans, disabled for now // nope, still need to parallelize out te...
the_stack
\brief Unit tests for thread-level GEMM */ #include "../../common/cutlass_unit_test.h" #include "cutlass/gemm/thread/mma.h" #include "testbed.h" ///////////////////////////////////////////////////////////////////////////////////////////////// // // Compute capability SM60 // TEST(SM60_Hgemm_thread, col_row_col_1x1...
the_stack
#include "nnnormalizelp.hpp" #include "datacu.hpp" #include <vector> #include <algorithm> // ------------------------------------------------------------------- // Helpers // ------------------------------------------------------------------- struct GPUVisit...
the_stack
// cudaColormapFromStr cudaColormapType cudaColormapFromStr( const char* str ) { if( !str ) return COLORMAP_DEFAULT; if( strcasecmp(str, "inferno") == 0 ) return COLORMAP_INFERNO; else if( strcasecmp(str, "magma") == 0 ) return COLORMAP_MAGMA; else if( strcasecmp(str, "parula") == 0 ) return COLORMAP_PARUL...
the_stack
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> /*HIP error wraper*/ static void HIP_ERROR(hipError_t err) { if (err != hipSuccess) { printf("HIP ERROR: %s, exiting\n", hipGetErrorString(err)); exit(-1); } } /*constants*/ #defin...
the_stack
#include "polydet.cu" #include "sturm.cu" #include "polyquotient.cu" #include "cheirality.cu" #include "essential_matrix_5pt.cu" //#include "essential_matrix_6pt.cu" // Declare constant memory (64KB maximum) __constant__ int c_num_points; __constant__ int c_num_test_points; __constant__ int c_ransac_num_test_points; _...
the_stack
#include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/densegrid.inl" #include "cupoch/geometry/geometry_functor.h" #include "cupoch/geometry/intersection_test.h" #include "cupoch/geometry/occupancygrid.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/voxelgrid.h" #include "cupoch/uti...
the_stack
#pragma once ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // Notes: // // The following implementations are prototypes, do not exploit any reuse and // currently have bad performance. Their purpose is to be...
the_stack
#include <boost/preprocessor.hpp> #include <collectives/ib_comm.hpp> #include <iostream> #include <sstream> #include <utils.cuh> #include <utils.hpp> namespace HugeCTR { #define MAX_AR_CHANNELS 31 IbComm::ARCollContext::ARCollContext(IbComm* comm) { size_t num_gpus = comm->num_gpus_; num_gpus_ = num_gpus; std:...
the_stack
#include <thrust/scan.h> #include <thrust/sort.h> #include <thrust/device_vector.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <thrust/sequence.h> #include <thrust/iterator/transform_iterator.h>...
the_stack
#if BF_CUDA_ENABLED #include "transpose_gpu_kernel.cuh" #include "cuda.hpp" #else typedef int cudaStream_t; // WAR #endif #include <cstdio> #include <algorithm> #include <limits> #include <sstream> template<int N> struct aligned_type { typedef char type; }; template<> struct aligned_type< 2> { typedef...
the_stack
#include "ew_op_gpu.h" #include <stdio.h> #include <type_traits> // # kernel 1 // new_vr = decay * vr + (1 - decay) * np.mean(grad**2 + eps1, axis=1, keepdims=True) // tf.assign(vr, new_vr) // ltm = np.mean(new_vr, keepdims=True) template <typename T, typename V> __global__ void adafactor_row_variance( float* RV,...
the_stack
extern "C" { #include "../cwc.h" #include "../cwc_internal.h" } #include "../../inc/ccv_convnet_internal.h" template <int input_per_thread, int filter_per_thread, int filter_per_block> __global__ static void _cwc_kern_convolutional_forward_propagate(const int strides, const int border, const int batch, float* input,...
the_stack
#include "CUFLU.h" #if ( MODEL == HYDRO && \ ( RSOLVER == EXACT || CHECK_INTERMEDIATE == EXACT ) && \ ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) ) // external functions #ifdef __CUDACC__ #include "CUFLU_Shared_FluUtility.cu" #else // #ifdef __CUDACC__ void Hydro_Con2Pr...
the_stack
using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif long fsize(int fd) { struct stat stat; int res = fstat(fd, &stat); return stat.st_size; } int printll(char *s) { while (*s != '\n' && *s != ',' && *s != '\t') { putchar(*s++); } return 0; } long hash(char *str0, int len) { uns...
the_stack
#include <cuda.h> #include "../../engines/cuda/utils.hh" #include <stdint.h> #include <assert.h> #include <stdio.h> /******************************************************************* HMAC-SHA1 kernel ******************************************************************/ #ifdef __DEVICE_EMULATION__ #define debugpr...
the_stack
#include <nbla/array.hpp> #include <nbla/imperative.hpp> #include <nbla/variable.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/sync_batch_normalization.hpp> #include <nbla/cuda/limits.hpp> #include <nbla/function/add2.hpp> #include <nbla/function/concatenate.hpp> #include <nbla/function/slice.hpp>...
the_stack
#include "blaze/common/common_defines.h" #include "blaze/common/exception.h" #include "blaze/math/float16.h" namespace blaze { template <> void Gemm<float16, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, ...
the_stack
#include <THC/THCTensorCopy.h> #include <THC/THCReduceApplyUtils.cuh> #include <THC/THCTensorTypeUtils.cuh> #include <THC/THCTensorCopy.hpp> #include <ATen/cuda/CUDAContext.h> // // This file contains pointwise operation functions and kernels that // work on both contiguous and non-contiguous tensor arguments ...
the_stack
#define LIMIT -999 #include <stdio.h> #include <string.h> #include <stdlib.h> #include <sys/time.h> #include <cuda.h> #include "reference.cpp" // kernel #define SCORE(i, j) input_itemsets_l[j + i * (BLOCK_SIZE+1)] #define REF(i, j) reference_l[j + i * BLOCK_SIZE] __device__ __host__ int maximum( int a, int b, int...
the_stack
namespace flowfilter { namespace gpu { __global__ void flowPropagateX_k(cudaTextureObject_t inputFlow, gpuimage_t<float2> flowPropagated, const float dt, const int border) { const int height = flowPropagated.height; const int width = flowP...
the_stack
//========================================================================================================================================================================================================200 // DEFINE / INCLUDE //============================================================================================...
the_stack
#include <device_launch_parameters.h> namespace surfelwarp { namespace device { struct FusionAndMarkAppendedObservationSurfelDevice { // Some constants defined as enum enum { scale_factor = d_fusion_map_scale, fuse_window_halfsize = scale_factor >> 1, count_model_halfsize = 2 * scale_factor /*>> 1 */, ...
the_stack
* Benchmark: Sparse matrix operations, i.e. matrix-vector products (sparse.cpp and sparse.cu are identical, the latter being required for compilation using CUDA nvcc) * */ //#define VIENNACL_BUILD_INFO #ifndef NDEBUG #define NDEBUG #endif #define VIENNACL_WITH_UBLAS 1 #include <boost/numeric/ublas/...
the_stack
#include <ATen/ATen.h> #include <THC/THCAtomics.cuh> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + T...
the_stack
#include <array> // std::array #include <cuda_runtime.h> // cudaError_t #include <cusolverDn.h> // Dn = dense (matrices) #include <cuComplex.h> // cuComplex, cuDoubleComplex #include "gputimer.h" // GpuTimer /* ****************************************************************** */ /* ****** "BOILERPLATE" routines...
the_stack
* Device code. */ #ifndef _PARTICLES_KERNEL_H_ #define _PARTICLES_KERNEL_H_ #include <stdio.h> #include <math.h> #include "cutil_math.h" #include "math_constants.h" #include "particles_kernel.cuh" #if USE_TEX // textures for particle position and velocity texture<float4, 1, cudaReadModeElementType> oldPosTex; textu...
the_stack
* Bottom-level digit-reduction/counting kernel ******************************************************************************/ #pragma once #include "radixsort_kernel_common.cu" namespace b40c { /****************************************************************************** * Cycle-processing Routines *******...
the_stack
#include "core/context_cuda.h" #include "contrib/rcnn/bbox_utils.h" namespace dragon { namespace rcnn { /******************** BBox ********************/ template <typename T> __device__ int _BBoxTransform(const T dx, const T dy, const T d_log_w, const T d_log_h, ...
the_stack
#include <ATen/cuda/CUDAApplyUtils.cuh> #define CHECK_CUDA(x) \ TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) \ TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) \ CHECK_CUDA(x); \ CHECK_CONTIGUOUS(x) namespace { int const thread...
the_stack
#include <cassert> // CUDA #include "../include/cuda_basic.h" #include "../include/helper_math.h" #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/reduce.h> #include <thrust/scan.h> namespace pbf { namespace impl_ { // ParticleSystemGpu // // CellGridGpu (equivalent to SpatialHash on CP...
the_stack
using namespace at; /* ------------------------------begin of the forward--------------------------- */ #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define maxn 51 const double eps = 1E-...
the_stack
#include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/vec_math.hpp" namespace cv { namespace cuda { namespace device { namespace match_template { __device__ __forceinline__ float sum(float v) { return v; } __device__ __forceinline__ float sum(float2 v) { return v.x + v.y; } ...
the_stack
#pragma once #include <chrono> #include <thread> #include <gunrock/app/enactor_kernel.cuh> #include <gunrock/app/enactor_helper.cuh> #include <gunrock/util/latency_utils.cuh> /* this is the "stringize macro macro" hack */ #define STR(x) #x #define XSTR(x) STR(x) namespace gunrock { namespace app { /* * @brief Ite...
the_stack
#include "badslam/cuda_depth_processing.cuh" #include <cub/cub.cuh> #include <libvis/cuda/cuda_auto_tuner.h> #include <math_constants.h> #include "badslam/cuda_util.cuh" #include "badslam/cuda_matrix.cuh" #include "badslam/kernels.cuh" #include "badslam/util.cuh" namespace vis { __global__ void BilateralFilteringAn...
the_stack
#include <gtest/gtest.h> #include <string> #include <tuple> #include <utility> #include <vector> #include "dali/core/cuda_event.h" #include "dali/core/cuda_stream.h" #include "dali/operators/math/expressions/arithmetic_meta.h" #include "dali/operators/math/expressions/expression_impl_gpu.cuh" #include "dali/test/dali_...
the_stack
extern "C" { #include "lua.h" #include "lualib.h" #include "lauxlib.h" } #include "luaT.h" #include "THC.h" #include <stdio.h> #include <assert.h> #include <math_constants.h> #include <math_functions.h> #include <stdint.h> #include <unistd.h> #define TB 256 #define EPS 1e-4 THCState* getCutorchState(lua_State* L...
the_stack
#include "src/DeviceTensorUtils.h" #include "THCTensor.h" #include "cuda/CudaUtils.cuh" #include "cuda/DeviceTensor.cuh" #include "cuda/MemoryAccess.cuh" #include "cuda/util/CachedDeviceProperties.h" #define ENABLE_CUDA_DEBUG #include "cuda/CudaDebugUtils.cuh" #include <thrust/host_vector.h> #include <thrust/device_...
the_stack
#include <cmath> #include "core/context_cuda.h" #include "core/tensor.h" #include "utils/cuda_device.h" #include "utils/op_kernel.h" #include "utils/math_functions.h" namespace dragon { namespace kernel { template <typename T> __global__ void _Empty() { } template<> void Empty<float, CUDAContext>() { _Empty<fl...
the_stack
namespace caffe2 { #define CUDA_FUNCTOR(name, op, input_type, output_type) \ template <int b_is_scalar, typename T, typename R> \ __global__ void name##Kernel(const T* a, const T* b, R* out, int n) { \ CUDA_1D_KERNEL_LOOP(i, n) { \ out[i] = op(a[i], b[b_is_scalar ? 0 : i]); \ } \ } \ template <typename T, type...
the_stack
//#include "thrust/detail/device_ptr.inl" __global__ void findAdjacencySizesKernel(int size, int *adjIndexes, int *output) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { output[idx] = adjIndexes[idx + 1] - adjIndexes[idx]; } } __global__ void allocateNodesKernel(int size, int *a...
the_stack
#include "opticalFlowUtils.hpp" #include "backend/common/vectorOps.hpp" #include "backend/cuda/deviceBuffer.hpp" #include "backend/cuda/deviceStream.hpp" #include "gpu/image/sampling.hpp" #include "gpu/image/imageOps.hpp" #include "gpu/image/blur.hpp" #include "gpu/stream.hpp" #include "cuda/error.hpp" #include "cuda/...
the_stack
#include <algorithm> #include <unordered_map> #include "gloo/cuda_private.h" namespace gloo { namespace nccl { // Allocate a set of per-device streams used to serialize NCCL op scheduling. // These ensure concurrent NCCL ops are not interleaved across devices (i.e., // through priority scheduling), resulting in dead...
the_stack
#include "typedef.h" #include "cuda_rys_sp.h" void my_cuda_safe(hipError_t err, std::string word) { if(err != hipSuccess) { fprintf(stderr, "Error during %s: ", word.c_str()); // check for error hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if(error != hipSuccess) { ...
the_stack
Parallel reduction This sample shows how to perform a reduction operation on an array of values to produce a single value in a single kernel (as opposed to two or more kernel calls as shown in the "reduction" CUDA Sample). Single-pass reduction requires Cooperative Groups. Reductions are a very common comp...
the_stack
#include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template<typename Dtype, typename MItype, typename MOtype> void SoftmaxWithLossLayer<Dtype, MItype, MOtype>::GenerateProgram() { this->device_program_ = this->device_->CreateProgram(); stringstream ss; ss ...
the_stack
#include "opencv2/opencv_modules.hpp" #ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include <cuda.h> #include "stereosgm.hpp" #include "opencv2/cudev/common.hpp" #include "opencv2/cudev/warp/warp.hpp" #include "opencv2/cudastereo.hpp" namespace cv { namespace cuda { namespace device { namespac...
the_stack
#include "caffe/layers/bn_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> void BNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* const_bottom_data = bottom[0]->gpu_data(); const Dtype* const_top_data =...
the_stack
/// \file GPUReconstructionCUDA.cu /// \author David Rohr #include "GPUReconstructionCUDADef.h" #include "GPUReconstructionCUDAIncludes.h" #include <cuda_profiler_api.h> #include <unistd.h> #include "GPUReconstructionCUDA.h" #include "GPUReconstructionCUDAInternals.h" #include "GPUReconstructionIncludes.h" #include ...
the_stack
namespace caffe2 { __global__ void AdamUpdate( int N, const float* g, const float* m, const float* v, float* ng, float* nm, float* nv, float beta1, float beta2, float eps_hat, float correction, const float* lr) { CUDA_1D_KERNEL_LOOP(i, N) { float gi = g[i]; flo...
the_stack
#include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/output.hpp" #include "caffe/layers/correlation_layer.hpp" #include "caffe/caffe.hpp" #define ROUND_OFF 50000 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 na...
the_stack
#include <Eigen/Core> #include <df/camera/poly3.h> // TODO #include <df/transform/rigid.h> #include <df/transform/nonrigid.h> #include <df/util/cudaHelpers.h> #include <df/util/dualQuaternion.h> // TODO #include <df/voxel/color.h> #include <df/voxel/probability.h> #include <df/voxel/compositeVoxel.h> #include <df/vox...
the_stack
static __thread unsigned long* err_addr; static __thread unsigned long* err_expect; static __thread unsigned long* err_current; static __thread unsigned long* err_second_read; static __thread unsigned int* err_count; static __thread unsigned int unreported_errors=0; __thread struct timeval last_report_time; extern unsi...
the_stack
namespace mn { struct mgsp_benchmark { using streamIdx = Cuda::StreamIndex; using eventIdx = Cuda::EventIndex; using host_allocator = heap_allocator; struct device_allocator { // hide the global one void *allocate(std::size_t bytes) { void *ret; checkCudaErrors(cudaMalloc(&ret, bytes)); r...
the_stack
#include <thrust/reduce.h> #include <cstdio> #include <cstring> #include <fstream> #include <sstream> #include "chrono_fsi/utils/ChUtilsDevice.cuh" #include "chrono_fsi/utils/ChUtilsPrintSph.cuh" namespace chrono { namespace fsi { namespace utils { void PrintToFile(const thrust::device_vector<Real4>& posRadD, ...
the_stack
/*---Main---*/ int main( int argc, char** argv ) { Arguments args; memset( (void*)&args, 0, sizeof(Arguments) ); args.argc = argc; args.argv_unconsumed = (char**) malloc( argc * sizeof( char* ) ); args.argstring = 0; for( int i=0; i<argc; ++i ) { if ( argv[i] == NULL) { printf("Null command l...
the_stack
Bicubic filtering See GPU Gems 2: "Fast Third-Order Texture Filtering", Sigg & Hadwiger https://developer.nvidia.com/gpugems/gpugems2/part-iii-high-quality-rendering/chapter-20-fast-third-order-texture-filtering Reformulation thanks to Keenan Crane */ #ifndef _BICUBICTEXTURE_KERNEL_CUH_ #define _BICUBICTE...
the_stack
#include <thrust/device_ptr.h> #include <thrust/scan.h> #include <cuComplex.h> #include "../cuspreadinterp.h" #include "../memtransfer.h" #include "../precision_independent.h" using namespace std; int CUFINUFFT_SPREAD3D(int nf1, int nf2, int nf3, CUCPX* d_fw, int M, FLT *d_kx, FLT *d_ky, FLT* d_kz, CUCPX *d_c, CUF...
the_stack
extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "gaterecurrent2dnoind_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __device__ void get_gate_idx_sf(int h1...
the_stack
//#include <cub/device/device_radix_sort.cuh> #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = o...
the_stack
#include "ew_op_gpu.h" #include "gpu_hmma.h" #include <stdio.h> #if __CUDA_ARCH__ >= 700 template <uint OP_A, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_32x128x32_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* ...
the_stack
#include <basic_types.h> #include <util.h> #include <error.h> #include <types.h> #include <matrix_coloring/min_max_2ring.h> #include <cusp/format.h> #include <cusp/copy.h> #include <cusp/detail/random.h> #include <thrust/count.h> #include <thrust/extrema.h> #include <sm_utils.inl> #define COLORING_DEBUG 1 // Pseudo-...
the_stack
#include <cooperative_groups.h> #include <torch/extension.h> using namespace cooperative_groups; typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor_4d; typedef torch::PackedTensorAccessor32<int64_t, 4, torch::RestrictPtrTraits> int64_accessor_4d; typedef torch::PackedTensorAccess...
the_stack
namespace dgl { using runtime::NDArray; namespace aten { namespace impl { ///////////////////////////// CSRIsNonZero ///////////////////////////// template <DLDeviceType XPU, typename IdType> bool CSRIsNonZero(CSRMatrix csr, int64_t row, int64_t col) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); ...
the_stack
#include <cuml/common/logger.hpp> #include <raft/linalg/eltwise.cuh> #include <raft/linalg/norm.cuh> #include <cuda_runtime.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/transform.h> #include <sys/time.h> #include <raft/random/rng.hpp> #include <raft/stats/sum.hpp> #include <unistd.h...
the_stack
#include <cutil.h> #include <util.h> #include <blas.h> #include <multiply.h> #include <matrix_analysis.h> #include<thrust/count.h> //count #include<thrust/sort.h> //sort #include<thrust/binary_search.h> //lower_bound #include<thrust/unique.h> //unique #include<cusp/detail/format_utils.h> //offsets_to_indices #includ...
the_stack
using namespace std; // Forward declaration template <class real> void RunTest(string testName, ResultDatabase &resultDB, OptionParser &op); // ******************************************************** // Function: toString // // Purpose: // Simple templated function to convert objects into // strings using string...
the_stack
// SalientRegionDetect.cu // 实现图像显著性区域检测 #include "SalientRegionDetect.h" #include <iostream> #include <stdio.h> #include <cmath> using namespace std; #include "ErrorCode.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:MAX_TEMPLATE // 定义领域模板的最大值。 #ifndef MA...
the_stack
#include <map> #define kBsdfSamples 1.0f #define kProbeSamples 1.0f #define kRayEpsilon 0.001f #define USE_LIGHT_SAMPLING 1 namespace { struct GPUScene { Primitive* primitives; int numPrimitives; Primitive* lights; int numLights; Sky sky; BVH bvh; }; // create a texture object from memory and store it i...
the_stack
// Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: phillicl #include "TableDihedralForceGPU.cuh" #include "hoomd/TextureTools.h" #include "hoomd/VectorMath.h" #include <assert.h> // SMALL a rel...
the_stack
#include <cassert> #include <fstream> #include <iostream> #include <sstream> #include "Timer.h" #include <cuda.h> #include <cuda_runtime_api.h> #include "OptionParser.h" #include "ResultDatabase.h" #include "cudacommon.h" using namespace std; // leftrotate function definition #define LEFTROTATE(x, c) (((x) << (c)) |...
the_stack
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <inttypes.h> #include <math.h> #include <float.h> #include <stdint.h> #include <cuda.h> #include "sleefinline_purec_scalar.h" #include "sleefinline_cuda.h" #define STDIN_FILENO 0 #define SIMD_SUFFIX _cuda_sleef #define CONCAT_SIMD_SUFFIX_(keyword, s...
the_stack
#include <pyre/journal.h> #include <isce3/core/Ellipsoid.h> #include <isce3/core/Projections.h> #include <isce3/cuda/container/RadarGeometry.h> #include <isce3/cuda/core/OrbitView.h> #include <isce3/cuda/core/gpuLUT2d.h> #include <isce3/cuda/core/gpuProjections.h> #include <isce3/cuda/except/Error.h> #include <isce3/c...
the_stack
namespace anakin { namespace saber { /** * @brief reduce tensor acorrding to the given reduce dim. * e.g. * input tensor with shape [5, 2, 10, 4] (rank = 4, how many dimentions does a tensor have.) * and the reduce dim may have the following forms: * 1) reduce_dim = None...
the_stack
///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutla...
the_stack
#include "_reg_blocksize_gpu.h" /* ******************************** */ /* ******************************** */ NiftyReg_CudaBlock100 * NiftyReg_CudaBlock::instance = NULL; /* ******************************** */ /* ******************************** */ NiftyReg_CudaBlock100::NiftyReg_CudaBlock100() { Block_target_bloc...
the_stack
#include <cuda_runtime.h> #include <pycaUtils.h> #include <gcache.h> #include "PyCAException.h" #include "mem.h" #include "MemOperDefs.h" // TEST make sure boost isn't included in nvcc code #if defined(BOOST_COMPILER) int bla[-1]; #endif #define GMEM_UNARY_OPERS(OP) MEM_UNARY_OPERS(GMemOpers, OP) #define GMEM_BINARY...
the_stack
#include "../config.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" #include "../block/block_load.cuh" #include "../block/block_store.cuh" #include "../block/block_merge_sort.cuh" #include <thrust/system/cuda/detail/core/util.h> CUB_NAMESPACE_BEGIN template < int _BLOCK_THRE...
the_stack
#include <cstdint> template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Iterator5> __global__ void reduce_by_key_kernel(ExecutionPolicy exec, Iterator1 keys_first, Iterator1 keys_last, Iterator2 ...
the_stack
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ ...
the_stack
* \file * The cub::BlockDiscontinuity class provides [<em>collective</em>](index.html#sec0) methods for flagging discontinuities within an ordered set of items partitioned across a CUDA thread block. */ #pragma once #include "../util_type.cuh" #include "../util_ptx.cuh" #include "../util_namespace.cuh" /// Optiona...
the_stack
using namespace std::chrono; typedef high_resolution_clock myclock; typedef duration<float> myduration; #define MAX_WG_SIZE 256 template <typename T> T* mem_alloc (const int align, const size_t size) { return (T*) aligned_alloc(align, size * sizeof(T)); } template <typename T> void mem_free (T* p) { ...
the_stack
#include "cutil_math.h" #include "radixsort.cu" // Build in RadixSort __constant__ FluidParams simData; __constant__ uint gridActive; __global__ void insertParticles ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; registe...
the_stack
//////////////////////////////////////////////////////////////////////////////////// // BASELINE FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // All "baseline" code is at the top of this file. The baseline code is a simple // implementation of the algorithm, with only m...
the_stack
#include "miner.h" #include "cuda_helper.h" #include "cuda_vectors.h" #include "streebog_arrays.cuh" __constant__ static uint2 keccak_round_constants[24] = { { 0x00000001, 0x00000000 }, { 0x00008082, 0x00000000 }, { 0x0000808a, 0x80000000 }, { 0x80008000, 0x80000000 }, { 0x0000808b, 0x00000000 }, { 0x80000001, 0...
the_stack
* COMPILATION TIP * nvcc main_draft1.cu ../grid2d/grid2d.cu ../grid2d/sysparam.cu ../dynam/XORMRGgens.cu ../common/gridsetup.cu -o main * * */ #include "../grid2d/grid2d.h" // Spins2d (struct) #include "../grid2d/sysparam.h" // Sysparam, Avg, TransProb, Sysparam_ptr, Avg_ptr, TransProb_ptr, constTransProb #incl...
the_stack
#include "GReduce.h" #include <pycaUtils.h> #include <mem.h> #include "ReduceKernel.cu" #include <gmem.h> #include <Vec2D.h> #include "CudaUtils.h" // TEST make sure boost isn't included in nvcc code #if defined(BOOST_COMPILER) int bla[-1]; #endif namespace PyCA { template<typename T, typename opers> T accumulate(T*...
the_stack
TODO: Clean up by abstracting some of the common pieces into separate funcs E.g., 8/16-bit renormalization alpha/beta application TODO: Try 2D tex fetch TODO: dp4a version: Preprocess by reordering: < [time, chan,stand,pol,cpx] int8_t > [time/4,...
the_stack
namespace caffe { template<typename Dtype, typename MItype, typename MOtype> void MOELayer<Dtype, MItype, MOtype>::Forward_gpu( const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) { int_tp select_experts = this->layer...
the_stack
#include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvBowtie/bowtie2/cuda/scoring_queues.h> namespace nvbio { namespace bowtie2 { namespace cuda { namespace { // anonymous namespace // // Setup the contents of a Read...
the_stack
FusedOp::FusedOp(FFModel& model, Op* op) : Op(model, OP_FUSED, op->name, 0) { numInputs = op->numInputs; for (int i = 0; i < numInputs; i++) { inputs[i] = op->inputs[i]; input_lps[i] = op->input_lps[i]; input_grad_lps[i] = op->input_grad_lps[i]; } numWeights = op->numWeights; for (int i = 0; i ...
the_stack
#include "trt_engine/trt_network_crt/plugins/grid_sampler_plugin/grid_sampler_plugin.h" #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "trt_engine/trt_network_crt/plugins/common/half_ext.cuh" FWD_TRT_NAMESPACE_BEGIN #define LAUNCH_BOUNDS_0 \ __launch_bounds__(256, 4) // default launch b...
the_stack
#include "datamex.hpp" #if ENABLE_GPU #include "datacu.hpp" #endif #ifndef NDEBUG #include<iostream> #endif using namespace vl ; /* ---------------------------------------------------------------- */ /* MexContext */ /* -------------------------------------------...
the_stack
#pragma once #include <cuda.h> #include <gunrock/util/cuda_properties.cuh> #include <gunrock/util/vector_types.cuh> namespace gunrock { namespace util { namespace io { /** * Enumeration of data movement cache modifiers. */ namespace st { enum CacheModifier { NONE, // default (currently wb) cg, // cache gl...
the_stack
* Example showing the use of CUFFT for fast 1D-convolution using FFT. * This sample is the same as simpleCUFFT, except that it uses a callback * function to perform the pointwise multiply and scale, on input to the * inverse transform. * */ // includes, system #include <stdlib.h> #include <stdio.h> #include <str...
the_stack
#include <iostream> namespace chrono { // dot product of each column of a matrix with itself CUDA_HOST_DEVICE inline real3 DotMM(const real* M) { real3 result; result.x = M[0] * M[0] + M[1] * M[1] + M[2] * M[2]; result.y = M[4] * M[4] + M[5] * M[5] + M[6] * M[6]; result.z = M[8] * M[8] + M[9] * M[9] +...
the_stack
#define CUB_STDERR #include <iterator> #include <cub/warp/warp_store.cuh> #include <cub/iterator/cache_modified_output_iterator.cuh> #include <cub/iterator/discard_output_iterator.cuh> #include <cub/util_allocator.cuh> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sequence.h> #i...
the_stack
#include "ew_op_gpu.h" #include "gpu_hmma.h" #include <stdio.h> #if __CUDA_ARCH__ >= 700 template <bool N64> __device__ __noinline__ void stg_64x64x64_nx(ehalf* Y, uint offsetY, uint loadY, uint N, uint K, uint n, uint i) { for (uint j = 0; j < 2; j++) if (N64 || n + i*16 + j*32 < N) store_h...
the_stack
namespace xlib { namespace gpu { template<typename T> void printArray(const T* d_array, size_t size, const std::string& title, const std::string& sep) noexcept { auto h_array = new T[size]; cuMemcpyToHost(d_array, size, h_array); xlib::printArray(h_array, size, title, sep); delete[] h_a...
the_stack