text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
#include <cstdio>
#include <cstdlib>
#include <curand_kernel.h>
#include <raft/common/cub_wrappers.cuh>
#include <raft/common/scatter.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/cudart_utils.h>
#include <raft/handle.hpp>
#include <random>
#include <rmm/device_uvector.hpp>
#include <stdint.h>
#include <type_trait... | the_stack |
#include <types.h>
#include <cutil.h>
#include <error.h>
#include <cusp/print.h>
#include <my_timer.h>
__device__ __constant__ CGType c_w_x_3d[DEGREE];
__device__ __constant__ CGType c_w_y_3d[DEGREE];
__device__ __constant__ CGType c_w_z_3d[DEGREE];
__device__ __constant__ CGType c_phi[DEGREE*DEGREE*DEGREE * 4];
temp... | the_stack |
__constant__ GlobalConstants cu_const_params;
#include "block_matching.cu_inl"
#include "aggregation.cu_inl"
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
////////////////////////////////////////////////////////////////////////////////... | the_stack |
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cuda_util.h"
#include "mat.h"
#include "padding_cuda.h"
#include <iostream>
namespace ncnn {
template<typename T>
__global__ void gpu_copy_make_border_image_type0(const T* src, const CudaMatInfo src_info, T* dst, const CudaMatInfo... | the_stack |
#include <cstdio>
#include <iostream>
#include <string>
using namespace std;
typedef unsigned char byte;
int main(){
InitTensorEngine<gpu>();
picojson::value v;
cin >> v;
if (std::cin.fail()) {
std::cerr << picojson::get_last_error() << std::endl;
return 1;
}
picojson::object& o = v.get<picojson::o... | the_stack |
#pragma once
#include "cuda_utils_kernels.cuh"
namespace cuhnsw {
__inline__ __device__
bool IsNeighbor(const int* graph, const int deg, const int dstid) {
__syncthreads();
// figure out the warp/ position inside the warp
int warp = threadIdx.x / WARP_SIZE;
int lane = threadIdx.x % WARP_SIZE;
static _... | the_stack |
__global__ void
initColorVolumeKernel (PtrStep<uchar4> volume)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
uchar4 *pos = volume.ptr (y) + x;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
fo... | the_stack |
KW_GLOBAL_KERNEL void kernelPartialsPartialsGrowing(KW_GLOBAL_VAR REAL* KW_RESTRICT partials1,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials2,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials3,
... | the_stack |
#include "ConsolidateAndIdentifyContours.h"
#include <iostream>
#include <fstream>
#include <cmath>
using namespace std;
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 宏:RED_MAC_COUNT
// 定义了重复进行边缘检测的次数。
#define RED_MAC_COUNT 4
// 宏:DILATE_TPL_SHAPE 和 SEARCH_TPL... | the_stack |
#include "typedef.h"
#include "cuda_rys_sp.h"
#include "cuda_rys_dp.h"
__device__ void cuda_Roots_dp(int n, double X, double roots[], double weights[]){
if (n <= 3)
cuda_Root123_dp(n,X, roots,weights);
else if (n==4)
cuda_Root4_dp(X, roots,weights);
else if (n==5)
cuda_Root5_dp(X, roots,weights);
... | the_stack |
#include <iostream>
using namespace std;
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 全局变量:_hardIplInimgTex(作为输入图像的纹理内存引用)
// 纹理内存只能用于全局变量,因此将硬件插值的旋转变换的 Kernel 函数的输入图像列
// 于此处。
static texture<unsigned char, 2, cudaReadModeElementType> _hardIplInimgTex;
// Kernel ... | the_stack |
#include "k2/csrc/ragged.h"
#include "k2/python/csrc/torch/torch_util.h"
#include "k2/python/csrc/torch/v2/any.h"
#include "k2/python/csrc/torch/v2/doc/any.h"
#include "k2/python/csrc/torch/v2/doc/doc.h"
#include "k2/python/csrc/torch/v2/ragged_any.h"
namespace k2 {
void PybindRaggedAny(py::module &m) {
py::class_<... | the_stack |
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/device_helper.h"
#include "Fast.h"
namespace Saiga
{
namespace CUDA
{
__constant__ unsigned char c_table[] = {
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x... | the_stack |
// input dimension: (B x N x 1 x H x W)
// output dimension: (B x N x 1 x H x W)
__global__ void DepthColorAngleReprojectionNeighbours_forward_depth_kernel(
float *input,
float *output,
float *cameras,
float *invKRs,
fl... | the_stack |
// For internal OP use, not user facing
template <typename T>
__global__ void Pad(const size_t size, const T* input, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_lef... | the_stack |
#ifdef WIN32
# ifndef strncasecmp
# define strncasecmp strnicmp
# endif
#endif
#define GET_LINE() if (!fgets(buf, 1024, f)) return false
#define COND_READ(cond, where, len) if ((cond) && !fread((void *)&(where), (len), 1, f)) return false
#define LINE_IS(text) !strncasecmp(buf, text, strlen(text))
#define BIGNUM 1.0... | the_stack |
namespace AggMIS {
namespace Aggregation {
Types::IntVector_h* AggregateToNearest(Types::Graph_h &graph,
Types::IntVector_h &roots) {
// Allocating an array for distances:
Types::IntVector_h rootDistance(roots.size());
// Alloc... | the_stack |
// "* 1" removes the warning:
// enumeral mismatch in conditional expression: ‘<anonymous enum>’ vs ‘<anonymous enum>’
__host__ __device__
inline int Dir_x( int octant ) { return octant & (1<<0) ? DIR_DN * 1: DIR_UP * 1; }
__host__ __device__
inline int Dir_y( int octant ) { return octant & (1<<1) ? DIR_DN * 1: DIR_UP ... | the_stack |
namespace amgx
{
/***************************************
* Source Definitions
***************************************/
template <class T_Config>
void CommsMPIDirect<T_Config>::exchange_matrix_halo(IVector_Array &row_offsets,
I64Vector_Array &col_indices,
MVector_Array &values,
I64Vector_Arr... | the_stack |
#ifndef INCLUDE_GGNN_CUDA_KNN_GGNN_MULTI_GPU_CUH_
#define INCLUDE_GGNN_CUDA_KNN_GGNN_MULTI_GPU_CUH_
#include <chrono>
#include <limits>
#include <string>
#include <thread>
#include <stdio.h>
#include <cstring>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include "cub/cub.cuh"
#include "ggnn/cuda_kn... | the_stack |
using namespace FW;
//------------------------------------------------------------------------
// Global variables.
//------------------------------------------------------------------------
__constant__ int4 c_input[(sizeof(RenderInput) + sizeof(int4) - 1) / sizeof(int4)];
__constant__ int4 c_blurLUT[BLUR... | the_stack |
* \file
* cub::DeviceHisto256 provides device-wide parallel operations for constructing 256-bin histogram(s) over data samples residing within global memory.
*/
#pragma once
#include <stdio.h>
#include <iterator>
#include "persistent_block/persistent_block_histo_256.cuh"
#include "../block/block_load.cuh"
#include... | the_stack |
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <curand.h>
#include <cuda_runtime_api.h>
#include "cub/device/device_radix_sort.cuh"
#include "cub/util_allocator.cuh"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <sys/time.h>
#include ... | the_stack |
* \test Testing the BLAS level 3 routines in the ViennaCL BLAS-like shared library
**/
// include necessary system headers
#include <iostream>
#include <vector>
// Some helper functions for this tutorial:
#include "viennacl.hpp"
#include "viennacl/tools/random.hpp"
#include "viennacl/vector.hpp"
template<typen... | the_stack |
// Project
#include "Geometry.h"
#include "PerfTimer.h"
extern "C" {
void exactinit();
RealType orient3d( RealType *pa, RealType *pb, RealType *pc, RealType *pd );
RealType insphere( RealType *pa, RealType *pb, RealType *pc, RealType *pd, RealType *pe );
}
///////////////////////////////////////... | the_stack |
extern "C" {
#include <stdint.h>
#include <memory.h>
}
#include "cuda_helper.h"
#define TPB52 1024
#define TPB50 384
#define NPT 2
#define NBN 2
uint32_t *d_nounce[MAX_GPUS];
uint32_t *h_nounce[MAX_GPUS];
__constant__ uint2 c_PaddedMessage80[ 6]; // padded message (80 bytes + padding?)
__constant__ uint2 c_mid[17];... | the_stack |
#ifndef INCLUDE_GGNN_CUDA_KNN_GGNN_CUH_
#define INCLUDE_GGNN_CUDA_KNN_GGNN_CUH_
#include <limits>
#include <string>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include "cub/cub.cuh"
#include "ggnn/cuda_knn_ggnn_gpu_instance.cuh"
#include "ggnn/query/cuda_knn_query_layer.cuh"
#in... | the_stack |
namespace amgx
{
// -----------
// Kernels
// -----------
/*************************************************************************
* "random" hash function for both device and host
************************************************************************/
__host__ __device__ static int ourHash(const int i, const int... | the_stack |
#include <cugraph/algorithms.hpp>
#include <cugraph/partition_manager.hpp>
#include <cuco/detail/hash_functions.cuh>
#include <cugraph/graph_view.hpp>
#include <cugraph/prims/reduce_v.cuh>
#include <thrust/reduce.h>
#include <raft/comms/comms.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <raft/handle.hpp>
#includ... | the_stack |
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "math.hpp"
#include "types.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/tensor.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include <cuda_runtime.h>
using namespace cv::dnn::cuda4... | the_stack |
#include <array/NDArray.h>
#include <execution/Threads.h>
#include <helpers/ConstantTadHelper.h>
#include <system/op_boilerplate.h>
#include "../triangular_solve.h"
namespace sd {
namespace ops {
namespace helpers {
/*
* lower triangular process for system of linear equations
* x_1 = b_1/a_1,1
* x_2 = (b_2 - a_2,1... | the_stack |
#include "thundergbm/builder/exact_tree_builder.h"
#include "thundergbm/util/multi_device.h"
#include "thundergbm/util/cub_wrapper.h"
#include "thundergbm/util/device_lambda.cuh"
#include "thrust/iterator/counting_iterator.h"
#include "thrust/iterator/transform_iterator.h"
#include "thrust/iterator/discard_iterator.h"... | the_stack |
namespace anakin {
namespace saber {
static void gemm(cublasHandle_t handle,
const bool TransA, const bool TransB,
int m, int n, int k, const float alpha,
const float* a, const float* b,
const float beta, float* c) {
// cout << "(" << m << "," ... | the_stack |
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/functional.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/scan.h>
#include <thrust/tabulate.h>
#include <thrust/transform.h>
namespace cudf {
namespace groupby {
namespace detail {
namespace {
/**
* @brief Functor... | the_stack |
#include "gpu/coredepth/sweep.hpp"
#include "backend/common/coredepth/sphereSweepParams.h"
#include "../surface.hpp"
#include "gpu/memcpy.hpp"
#include "core/transformGeoParams.hpp"
#include "libvideostitch/geometryDef.hpp"
#include "libvideostitch/panoDef.hpp"
#include "backend/cuda/deviceBuffer.hpp"
#include "b... | the_stack |
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "mog2.hpp"
namespace cv
{
namespace cuda
{
namespace device
{
namespace mog2
{
///////////////////////////////////////////////////////////////
... | the_stack |
// HACK TESTING
#include <iostream>
using std::cout;
using std::endl;
// 2x 4-bit --> 2x 8-bit (unsigned)
inline __device__ void gunpack(uint8_t ival,
uint16_t& oval,
bool byte_reverse,
bool align_msb,
... | the_stack |
// HIP not support cusolver
#include <thrust/device_vector.h>
#include <algorithm>
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/qr_op.h"
#include "paddle/fluid/platform/dynload/cusolver.h"
// Reuse some helper functions from svd
#include "paddle/fluid/operators/svd_helper... | the_stack |
#include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
#define BLOCK_X 8
#define BLOCK_Y 8
#define BLOCK_Z 4
using namespace cv;
// Algorithm itself has good performances, but memory allocation is a problem.
// ... | the_stack |
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
// Utilities and system includes
#include <helper_string.h> // helper for string parsing
#include <helper_image.h> // helper for image and data comparison
#include <helper_cuda.h> // helper for cuda error checking functions
const char *sSD... | the_stack |
#define THRD_PER_BLOCK 1024 // Number of threads per block (should always %32==0)
using isce3::cuda::signal::gpuLooks;
/**
input:
hi_res
output:
lo_res
*/
template <typename T>
void gpuLooks<T>::multilook(std::valarray<T> &hi_res,
std::valarray<T> &lo_res)
{
// allocate lo res output on device
T *d_l... | the_stack |
#pragma once
//#define SNN_DEBUG 1
#ifdef SNN_DEBUG
#define debug(a...) fprintf(stderr, a)
#else
#define debug(a...)
#endif
//#define SNN_DEBUG2
#ifdef SNN_DEBUG2
#define debug2(a...) fprintf(stderr, a)
#else
#define debug2(a...)
#endif
#include <iostream>
#include <unordered_set>
#include <vector>
... | the_stack |
#include <cuda.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <vector>
#include <memory>
#include <algorithm>
#include <immintrin.h>
#include "cudautils.h"
#define RUNCPU 1
#define CHECK 1
#define NPTS (2048*8)
#define NDIM 128
#define M1W 128
#define M2W 16
#define M2H ... | the_stack |
#include "caffe/layers/base_data_layer.hpp"
#include "caffe/layers/suncg_data_layer.hpp"
#include "caffe/util/rng.hpp"
#include "suncg_util.hpp"
#include "suncg_fusion.hpp"
// #include "suncg_fusion.cu"
DEFINE_bool(shuran_chatter, false,
"If you are Shuran and want chatter, turn this on.");
using std::ve... | the_stack |
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
struct compute_sphere_vertices_functor {
compute_sphere_vertices_functor(int resolution, float radius)
: resolution_(resolution), radius_(radius) {
step_ = M_PI / (float)resolution;
};
const int resolution_;
const fl... | the_stack |
#pragma once
#include "backend/common/imageOps.hpp"
#include <stdint.h>
namespace VideoStitch {
namespace Image {
inline __device__ uint32_t YRGBDiffToRGBA(unsigned char y, const int3& rgbDiff) {
const int32_t ya = (1164 * (y - 16)) / 1000;
return RGBA::pack(clamp8(ya + rgbDiff.x), clamp8(ya + rgbDiff.y), clamp... | the_stack |
namespace faiss { namespace gpu {
//
// IVF list length update
//
__global__ void
runUpdateListPointers(Tensor<int, 1, true> listIds,
Tensor<int, 1, true> newListLength,
Tensor<void*, 1, true> newCodePointers,
Tensor<void*, 1, true> newIndexPointers,
... | the_stack |
namespace hpc {
namespace rll {
namespace cuda {
// random seeding, keep up with caffe
int64_t cluster_seedgen(void) {
int64_t s, seed, pid;
FILE* f = fopen("/dev/urandom", "rb");
if (f && fread(&seed, 1, sizeof(seed), f) == sizeof(seed)) {
fclose(f);
return seed;
}
LOG(INFO) << "S... | the_stack |
#include <algorithm>
#include <string>
#include <vector>
#include <map>
#include <assert.h>
#include <helper_timer.h>
#include "../../nvmatrix/include/nvmatrix.cuh"
//#include "experimental/akrizhevsky/g3/mactruck-gpu-tests/gpu_util.cuh"
#include "weights.cuh"
#include "convnet.cuh"
#include "cost.cuh"
#include "neuro... | the_stack |
#include <nvidia/helper_cuda.h>
#define PI 3.141592653589793f
#define BLOCK_SIZE 256
// step size of the normals
// for PointXYZI
#define X_STEP 8
#define X_OFFSET 0
// for PointXYZ
//#define X_STEP 4
//#define X_OFFSET 0
__constant__ float c_rgbForMFaxes[7];
extern "C" void loadRGBvaluesForMFaxes()
{
float ... | the_stack |
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <c10/cuda/CUDAStream.h>
#include <c10/util/Optional.h>
#include "inplace_abn.h"
#include "utils.h"
#include "cuda_utils.cuh"
#include "inplace_abn_kernels.cuh"
#include "dispatch.h"
/***************************... | the_stack |
#define GET_BLOB_ADDRESS(ptr, offset) (&((ptr)[(offset)/sizeof((ptr)[0])]))
#define GET_ARRAY_CAPACITY(ptr) (((long *)(ptr))[0])
#define GET_ARRAY_LENGTH(ptr) (((long *)(ptr))[1])
#define GET_ARRAY_BODY(ptr) (&((ptr)[128/sizeof((ptr)[0])]))
#define SET_ARRAY_CAPACITY(ptr, val) { (((long *)(ptr))[0]) ... | the_stack |
#define BIN_SIZE 32
using namespace std;
#define CHECK(res) if(res!=cudaSuccess){exit(-1);}
#define BLOCKNUM 1024*64
#define THREADNUM 128
__global__ void _k_CACU_SUM_SIZE_GPU(float_t **data, int num, int sum_size,
int length, int out_length, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
... | the_stack |
namespace RPU {
/******************************************************************************************/
/* DefferenceRPUDeviceCuda
CUDA implementation of TransferRPUDevice
*/
template <typename T> void TransferRPUDeviceCuda<T>::initialize(bool transfer_columns) {
transfer_pwu_ =
RPU::make_unique<Pul... | the_stack |
*
* \brief CUDA-specific routines for the GPU implementation of SETTLE constraints algorithm.
*
*
* \author Artem Zhmurov <zhmurov@gmail.com>
*
* \ingroup module_mdlib
*/
#include "gmxpre.h"
#include "settle_gpu_internal.h"
#include <assert.h>
#include <stdio.h>
#include <cmath>
#include <algorithm>
#includ... | the_stack |
#include <cuda.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
__device__ static float atomicMin(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do
{
assumed = old;
old = ::atomicCAS(address_as_i, assumed, __float_as_int(::fm... | the_stack |
#include "nnnormalize.hpp"
#include "impl/dispatcher.hpp"
#include <cmath>
#include <cassert>
#include <cstring>
using namespace vl ;
using namespace vl::nn ;
using namespace vl::impl ;
template<vl::DeviceType deviceType, vl::DataType dataType> struct LRNForward ;
template<vl::DeviceType deviceType, vl::DataType data... | the_stack |
namespace MegBA {
namespace geo {
namespace {
template <typename T>
__global__ void RadialDistortionNoGradKernel(
const int nItem, const int N, const T *px_valueDevicePtr,
const T *py_valueDevicePtr, const T *px_gradDevicePtr,
const T *py_gradDevicePtr, const T *f_ptr, const T *k1_ptr, const T *k2_ptr,
... | the_stack |
#include <faiss/gpu/GpuResources.h>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/impl/L2Norm.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/MatrixMult.cuh>
#include <faiss/gpu/utils/Pair.cuh>
#inclu... | the_stack |
* \file
* Common type manipulation (metaprogramming) utilities
*/
#pragma once
#include <iostream>
#include <limits>
#include <cfloat>
#if (__CUDACC_VER_MAJOR__ >= 9)
#include <cuda_fp16.h>
#endif
#include "util_macro.cuh"
#include "util_arch.cuh"
#include "util_namespace.cuh"
/// Optio... | the_stack |
#include "sph/sph_fugue.h"
#include "cuda_helper.h"
#include <host_defines.h>
#define USE_SHARED 1
uint32_t *d_fugue256_hashoutput[8];
uint32_t *d_resultNonce[8];
__constant__ uint32_t GPUstate[30]; // Single GPU
__constant__ uint32_t pTarget[8]; // Single GPU
texture<unsigned int, 1, cudaReadModeElementType> mixT... | the_stack |
\brief CUTLASS Library handle.
*/
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include "cutlass/library/handle.h"
#include "cutlass/library/singleton.h"
#include "cutlass/library/util.h"
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////... | the_stack |
// Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW)
// we assume BHWD format in inputImages
// we assume BHW(YX) format on grids
__device__ void getTopLeft(float x, int xOut, int width, int& point, float& weight)
{
/* for interpolation :
stores in point and weight :
- the x-coordina... | the_stack |
#include <cugraph/algorithms.hpp>
#include <cugraph/graph_view.hpp>
#include <cugraph/prims/copy_to_adj_matrix_row_col.cuh>
#include <cugraph/prims/count_if_e.cuh>
#include <cugraph/prims/reduce_op.cuh>
#include <cugraph/prims/row_col_properties.cuh>
#include <cugraph/prims/transform_reduce_e.cuh>
#include <cugraph/pri... | the_stack |
#include "common.h"
#include <math.h>
#include <stdio.h>
#define RELERROR 1.0e-12 /* smallest relative error we want */
#define MAXPOW 32 /* max power of 10 we wish to search to */
#define MAXIT 800 /* max number of iterations */
#define MAX_RECURSE_DEPTH 10 /* maximum recursion d... | the_stack |
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/layers/spixel_feature_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SpixelFeatureXYForwardGPU(const int nthreads,
const Dtype* index_data, const Dtype ignore_idx_value,
const int out_dim, const in... | the_stack |
#include "TensorCUDA.hpp"
#include <stdio.h>
#include "Macros.hpp"
#include "ensure.hpp"
#include <memory>
////////////////////////////////////////////////////////////
/// NAMESPACE AI
////////////////////////////////////////////////////////////
namespace ai
{
/////////////////////////////////////////////////////////... | the_stack |
#include <typeinfo>
// number of all triangles for topologies up to __3__ triangles
#define NumTri 220
#define NumTop 96
__constant__ float eps=1e-6;
__constant__ float thres=1e-4;
// up to __3__ triangles
__constant__ int acceptTopologyWithFlip[2][96]={ {1, 2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 19, 25... | the_stack |
const int MAX_TEXTURES = 10;
__constant__ float Acuda[16];
void init_Acuda()
{
static bool initialized(false);
if (!initialized)
{
float A_h[16] = { -1.0/6.0, 3.0/6.0, -3.0/6.0, 1.0/6.0,
3.0/6.0, -6.0/6.0, 0.0/6.0, 4.0/6.0,
-3.0/6.0, 3.0/6.0, 3.0/6.0, 1.0/6.0,... | the_stack |
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
int nblock_size = 64;
int ngrid_size = 1;
int maxgsx = 65535;
int mmcc = 0;
static int devid;
static cudaError_t crc;
#define MAXSTREAMS 4
static cudaStream_t streams[MAXSTREAMS] = {NULL,NULL,NULL,NULL};
/* Prototypes for Fortran function called b... | the_stack |
namespace lbvh
{
namespace detail
{
struct node
{
std::uint32_t parent_idx; // parent node
std::uint32_t left_idx; // index of left child node
std::uint32_t right_idx; // index of right child node
std::uint32_t object_idx; // == 0xFFFFFFFF if internal node.
};
// a set of pointers to use it on devi... | the_stack |
* @file
* cub::DeviceSegmentedSort provides device-wide, parallel operations for
* computing a batched sort across multiple, non-overlapping sequences of
* data items residing within device-accessible memory.
*/
#pragma once
#include <cub/config.cuh>
#include <cub/device/dispatch/dispatch_segmented_sort.cuh>
#inc... | the_stack |
#include "k2/csrc/array_ops.h"
#include "k2/csrc/context.h"
#include "k2/csrc/fsa_algo.h"
#include "k2/csrc/fsa_utils.h"
namespace k2 {
// Caution: this is really a .cu file. It contains mixed host and device code.
class TopSorter {
public:
/**
Topological sorter object. You should call TopSort() after
... | the_stack |
#include <cmath>
#include <iostream>
#include <algorithm>
#include <cuda_runtime_api.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <QDebug>
#include <QFuture>
#include <QMutex>
#include <QtConcurrent/QtConcurrentRun>
using std::make_shared;
using std::pair;
using... | the_stack |
#include "nnbnorm.hpp"
#include "impl/dispatcher.hpp"
#include <cassert>
#include <cstring>
#include <cmath>
#include <cstdlib>
#include <limits>
#include <algorithm>
#include <iostream>
using namespace vl ;
using namespace vl::nn ;
using namespace vl::impl ;
template<DeviceType deviceType, DataType dataType> struct ... | the_stack |
///////////////////////////////////////////////////////////////////// Headers //
#include "GDelKernels.h"
#include "Geometry.h"
//////////////////////////////////////////////////// Exclusive-Inclusive Scan //
#define LOG2_WARP_SIZE 5U
#define WARP_SIZE (1U << LOG2_WARP_SIZE)
inline __device__ int warpScan... | the_stack |
#include <type_traits> //std::remove_cv
namespace xlib {
#define Store_MACRO(CACHE_MOD, ptx_modifier) \
\
template<> \
__device__ __forc... | the_stack |
namespace nv {
template<typename value_type>
struct ReplaceOp {
constexpr static value_type IDENTITY{0};
__host__ __device__ value_type operator()(value_type new_value, value_type old_value)
{
return new_value;
}
};
template<typename Table>
__global__ void insert_kernel(Table* table,
... | the_stack |
#pragma once
#include <gunrock/util/track_utils.cuh>
#include <gunrock/app/problem_base.cuh>
#include <gunrock/oprtr/1D_oprtr/for_all.cuh>
namespace gunrock {
namespace app {
namespace pr {
/**
* @brief Speciflying parameters for PR Problem
* @param parameters The util::Parameter<...> structure holding all param... | the_stack |
namespace tensorflow
{
using GPUDevice = Eigen::GpuDevice;
using namespace effectivetransformer;
namespace functor
{
template <typename T>
struct BertTransformerOpFunctor<GPUDevice, T>
{
typedef typename TransformerTFTraits<T>::DataType DataType_;
static Status Compute(OpKernelContext *context,
... | the_stack |
bert::bert (bool BERT_Large,
int num_gpu,
std::string dir,
int max_batchsize,
int max_seq_length) {
checkCudaErrors(cudaSetDevice(num_gpu));
handle = new global_handle(BERT_Large, dir, max_batchsize, max_seq_length);
init_ops();
}
void ber... | the_stack |
#include <cstdio>
#include <utility_kernels.h>
#include <optical_flow_kernels.h>
#define TWO_PI 6.28318530717958623199592694f
#define DC_THR 0.00001f
namespace vision {
// texture references for 2D float Gabor filter outs
// and previous scale optic flow field
texture<float2, 2, cudaReadModeElementType> d_Gabor_text... | the_stack |
std::map<CUdevice, int> nervana_sm_counts_;
std::map<std::string, CUfunction> nervana_kernels_;
std::vector<CUmodule> nervana_modules_;
//for when we need to modify the above data structures
std::mutex nervana_load_kernels_mutex_;
std::mutex nervana_sm_count_mutex_;
extern "C" bool nervana_loadKernels(const char* con... | the_stack |
#include <ATen/cuda/CUDAContext.h>
#include <pybind11/pybind11.h>
#include <torch/extension.h>
#include <unordered_map>
namespace py = pybind11;
namespace minkowski {
namespace detail {
template <typename src_type, typename dst_type>
__global__ void cuda_copy_n(src_type const *src, uint32_t N, dst_type *dst) {
CU... | the_stack |
* \file
* cub::BlockPartitionTiles implements a stateful abstraction of CUDA thread blocks for participating in device-wide list partitioning.
*/
#pragma once
#include <iterator>
#include "scan_tiles_types.cuh"
#include "../../thread/thread_operators.cuh"
#include "../../block/block_load.cuh"
#include "../../block... | the_stack |
template <typename scalar_t>
__device__ scalar_t modulated_deform_conv3d_im2col_trilinear(
const scalar_t *bottom_data, const int data_width,const int data_length,
const int height, const int width, const int length,scalar_t h, scalar_t w,scalar_t l)
{
int h_low = floor(h);
int w_low = floor(w);
int l_low = ... | the_stack |
#include "lite/kernels/cuda/fc_compute.h"
#include <string>
#include "lite/backends/cuda/cuda_utils.h"
#include "lite/core/op_registry.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
template <typename T>
struct FcTypeTraits;
template <>
struct FcTypeTraits<float> {
typedef float4 Typ... | the_stack |
#include <cub/cub.cuh>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <crc/crc.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <nvbio/sufsort/sufsort.h>
#include <nvbio/sufsort/sufsort_utils.h>
#include <nvbio/basic/exceptions.h>
#include <nvbio/basic/timer.h>
#include <nvbio... | the_stack |
#pragma once
#include <gunrock/graph/gp.cuh>
#include <gunrock/oprtr/advance/advance_base.cuh>
#include <gunrock/app/enactor_helper.cuh>
#include <gunrock/app/enactor_kernel.cuh>
namespace gunrock {
namespace app {
using IterationFlag = uint32_t;
enum : IterationFlag {
Use_SubQ = 0x01,
Use_FullQ = 0x02,
Push ... | the_stack |
\brief Command line options for performance test program
*/
#include <algorithm>
#include "cutlass/cutlass.h"
#include "cutlass/version.h"
#include "cutlass/library/util.h"
#include "options.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
na... | the_stack |
__device__ float clamp(float a, float minv, float maxv)
{
return fminf(fmaxf(minv, a),maxv);
}
__device__ float3 clampv3(float3 in, float3 minv, float3 maxv)
{
float xout = clamp(in.x,minv.x,maxv.x);
float yout = clamp(in.y,minv.y,maxv.y);
float zout = clamp(in.z,minv.z,maxv.z);
return make_float3(... | the_stack |
#include <torch/extension.h>
// #include <cuda.h>
// #include <cuda_runtime.h>
// #include <iostream>
// Copied from fast-soft-sort (https://bit.ly/3r0gOav) with the following modifications:
// - replace numpy functions with torch equivalents
// - re-write in CUDA
// - return solution in place
// - added backward... | the_stack |
#include <cuda.h>
#include "Utility.h"
#include "math.h" // CUDA math library
#include "qr.cuh"
#define gone 1065353216
#define gsine_pi_over_eight 1053028117
#define gcosine_pi_over_eight 1064076127
#define gone_half 0.5f
#define gsmall_number 1.e-12f
#define gtiny_number 1.e-20f
#define gfour_gamma_squared 5.8284273... | the_stack |
#include <cfloat>
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
/**************************** function declaration **************************/
RetCode convertTo(const uchar* src, int rows, int cols, int channels,
int src_stride, float* dst, in... | the_stack |
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "common.cuh"
#include <kat/tuple.hpp>
#include <cuda/api_wrappers.hpp>
#include <type_traits>
#include <cstdint>
#include <vector>
#include <algorithm>
#include <string>
#include <utility>
//#include "EASTLTest.h"
//EA_DISABLE_VC_WARNING(4623 4625 4413 4510)
name... | the_stack |
// This macro is to control shared memory usage. If set to 1, kernel loads the whole feature map
// into shared memory for reuse; If set to 0, kernel loads data from global memory directly.
// Roi pooling performance is data dependent. You can test which value is better to your data.
// If all bboxes are very small, 0... | the_stack |
#include "k2/csrc/context.h"
#include "k2/csrc/fsa_utils.h"
namespace k2 {
// field separator within a line for a text form FSA
static constexpr const char *kDelim = " \t";
// Convert a string to an integer. Abort the program on failure.
static int32_t StringToInt(const std::string &s) {
K2_CHECK(!s.empty());
b... | the_stack |
for (int x = 0; x < N && x < valid_examples; ++x) { expr; }
__device__ void
train_N_examples(const float * input,
const int *labels,
const float * example_weights,
int valid_examples,
int num_layers,
float * scratch, // shared memo... | the_stack |
#define WARP_SIZE 32
__device__
unsigned int scanwarp(unsigned int val, volatile unsigned int* sData, const int maxlevel)
{
// The following is the same as 2 * RadixSort::WARP_SIZE * warpId + threadInWarp =
// 64*(threadIdx.x >> 5) + (threadIdx.x & (RadixSort::WARP_SIZE - 1))
int localId = threadIdx.x;
int idx... | the_stack |
#include "ImageMatch.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块尺寸
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 宏:FAST_RUN
// 打开该开关,在 Kernel 中将不会搜索当前点的领域,可以加快运行时间,但匹配的
// 准确度将没有那么高
#define FAST_RUN
#ifdef FAST_RUN
// Device 数据,_tpl1x1Gpu
// 为了加快算法的运行时间,用此数据来替代 _tpl3x3Gpu
static __device__ int _tpl1x1Gpu[] = ... | the_stack |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.