text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
#include "flowfilter/gpu/util.h"
#include "flowfilter/gpu/error.h"
#include "flowfilter/gpu/propagation.h"
#include "flowfilter/gpu/device/propagation_k.h"
#include "flowfilter/gpu/device/misc_k.h"
namespace flowfilter {
namespace gpu {
FlowPropagator::FlowPropagator() :
Stage() {
__configured = false;
_... | the_stack |
#include <assert.h>
#include <cuda_fp16.h>
#include <cfloat>
#include <limits>
#include <stdint.h>
#include <cuda_fp16.h>
#include <c10/macros/Macros.h>
namespace {
int log2_ceil(int value) {
int log2_value = 0;
while ((1 << log2_value) < value) ++log2_value;
return log2_value;
}
template<typename T>
str... | the_stack |
#include "../Observers/ColorScaleObserverSingle.cu"
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
extern "C"
{
// PHYSICS PART ----------------------------
__global__ void SetForcesToZeroKernel(
float *force,... | the_stack |
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <builtin_types.h>
#include <vector_functions.h>
#include <float.h>
#include "SegmentDefs.cu"
#include "VisionMath.cu"
extern "C"
{
//index for enforce connectivity
const int dx4[4] = {-1, 0, 1, 0};
co... | the_stack |
#include <df/camera/poly3.h>
#include <df/surface/marchingCubesTables.h>
#include <df/util/cudaHelpers.h>
#include <df/util/eigenHelpers.h>
#include <df/voxel/color.h>
#include <df/voxel/probability.h>
#include <df/voxel/compositeVoxel.h>
#include <df/voxel/tsdf.h>
#include <df/transform/rigid.h>
#include <thrust/devi... | the_stack |
#include <cuda_runtime_api.h>
#include <visionaray/cuda/cast.h>
#include <visionaray/math/unorm.h>
#include <visionaray/math/vector.h>
#include <gtest/gtest.h>
using namespace visionaray;
//-------------------------------------------------------------------------------------------------
// Test casts between CUDA ... | the_stack |
* \file dnn/src/cuda/conv_bias/quint4x4x32_wmma/wmma_conv_integer_u4_fhxfw.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the L... | the_stack |
#include "cudnnUtils.h"
#include <ops/declarable/helpers/convolutions.h>
namespace sd {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
void checkConv2dCUDNNPadAsymmetric(NDArray* &input, NDArray* &gradI,
... | the_stack |
* \brief This file wraps CUFFT functionality into the Bifrost C++ API.
*/
/*
TODO: Implicitly padded/cropped transforms using load callback
*/
#include <bifrost/fft.h>
#include "assert.hpp"
#include "utils.hpp"
#include "cuda.hpp"
#include "trace.hpp"
#include "fft_kernels.h"
#include "ShapeIndexer.cuh"
#include ... | the_stack |
enum class ScaleType
{
SINGLE_SCALE,
PER_WEIGHT_CHANNEL,
PER_ACTIVATION_CHANNEL
};
ScaleType get_scale_type(const at::Tensor& input, const at::Tensor& input_low, const at::Tensor& input_range)
{
TORCH_CHECK(input_low.dim() == input_range.dim(), "input_low and input_range have different dimensionality"... | the_stack |
Parallel reduction kernels
*/
#ifndef _REDUCE_KERNEL_H_
#define _REDUCE_KERNEL_H_
#include <cuda_runtime_api.h>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n/2 threads
- only works f... | the_stack |
* @author Istvan Reguly
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#ifdef KNL
#include <hbwmalloc.h>
#else
#define hbw_malloc malloc
#define hbw_free free
#endif
struct full_data
{
int sizex;
int sizey;
int Nmats;
double * __restrict__ rho;
double *... | the_stack |
template<int SHIFT>
__forceinline__ __device__ int ACCS(const int i)
{
return (i & ((LMEM_STACK_SIZE << SHIFT) - 1))*blockDim.x + threadIdx.x;
}
#define BTEST(x) (-(int)(x))
texture<float4, 1, cudaReadModeElementType> texNodeSize;
texture<float4, 1, cudaReadModeElementType> texNodeCenter;
texture<float4, 1, cudaRe... | the_stack |
namespace xlib {
namespace detail {
template<int WARP_SZ, typename T>
struct WarpSegReduceHelper;
template<int WARP_SZ>
struct WarpSegReduceHelper<WARP_SZ, int> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(int& value, int max_lane) {
... | the_stack |
#include <cuda.h>
#include <cuda_runtime.h>
#include <helpers/DebugHelper.h>
#include <helpers/TAD.h>
#include <helpers/shape.h>
#include <loops/summarystatsreduce.h>
#include <ops/specials_cuda.h>
#include <system/Environment.h>
#include <system/op_boilerplate.h>
#include <types/float16.h>
#include <types/types.h>
us... | the_stack |
* COMPILATION TIP
* nvcc main_draft1.cu ../grid2d/grid2d.cu ../grid2d/sysparam.cu ../dynam/XORMRGgens.cu ../dynam/metropolis.cu ../common/gridsetup.cu -o main
*
* */
#include "../grid2d/grid2d.h" // Spins2d (struct)
#include "../grid2d/sysparam.h" // Sysparam, Avg, TransProb, Sysparam_ptr, Avg_ptr, TransProb_pt... | the_stack |
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <mex.h>
#include "kernels.cx"
#define UseCudaOnDoubles USE_DOUBLE_PRECISION
///////////////////////////////////////
///// CONV ////////////////////////////
///////////////////////////////////////
// thread kernel: computation of gammai = sum_j k(xi,y... | the_stack |
#include <stdio.h>
#define COEFF_L 0.16666666f
#define COEFF_C 0.66666666f
#define COEFF_B 0.83333333f
__device__ __constant__ int c_VoxelNumber;
__device__ __constant__ int3 c_ImageSize;
// Bins: Need 4 values for max 4 channels.
__device__ __constant__ int c_firstTargetBin;
__device__ __constant__ int c_secondTarg... | the_stack |
#include <common/types.h>
#include <cub/cub.cuh>
#include <cuda/cub_iterator.cuh>
#include <cuda/cudafuncs.h>
#include <map>
namespace sqaod_cuda {
namespace sq = sqaod;
template<class V, class InIt, class OutIt, class OffIt, int vecLen>
struct DeviceSegmentedSumType : sq::NullBase {
typedef DeviceSegmentedSumTy... | the_stack |
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/io/class_io/trianglemesh_io.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/filesystem.h"
#define TINYOBJLOADER_IMPLEMENTATION
#include "tiny_obj_loader.h"
namespace cupoch {
namespace io {
bool ReadTriangleMeshFromOBJ(const std::string& file... | the_stack |
#pragma once
#include <math/vector.h>
#include <math/matrix.h>
#include <meta_utils.h>
#include <utils.h>
#include <cstdint>
#include "config.h"
struct BlockRasterizerId
{
__device__
static int rasterizer()
{
return blockIdx.x;
}
};
template <int NUM_BLOCKS, int x_max, int y_max, int bin_si... | the_stack |
#define NUM_THREADS 64
// #define RADIUS 32
__global__ void se3_build_forward_kernel(
const torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> attention,
const torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> transforms,
const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrT... | the_stack |
#pragma once
#include <gunrock/util/test_utils.cuh>
#include <gunrock/util/multithread_utils.cuh>
namespace gunrock {
namespace util {
namespace scan {
template <typename _SizeT, int Block_N>
__device__ __forceinline__ void ScanLoop(_SizeT* s_Buffer, _SizeT* Sum,
_SizeT Sum_O... | the_stack |
This example shows how to run convolution kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU.
Writing a single high performance convolution kernel is hard but do-able. Whereas writing
high performance kernels at scale which works for multiple problem... | the_stack |
using namespace std;
typedef uint8_t uint8;
typedef unsigned int uint32;
typedef unsigned long long int uint64;
#define STREAM_BLOCK 16
#define BLOCK_SIZE 32
#define BLOCK_D_SIZE 64
#define INTEGRAL_BLOCK_SIZE 8
#define XDIM_MAX_THREADS 1024
#define XDIM_H_THREADS 512
#define XDIM_Q_THREADS 256
#define SHARED_MEMORY 49... | the_stack |
* bisection.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <assert.h>
// includes, project
#include <helper_functions.h>
#include <helper_cuda.h>
#include "config.h"
#include "structs.h"
#include "matlab.h"
#include "util.h"
#include "... | the_stack |
#include "include/common.h"
using Vec3ida = Eigen::Matrix<int, 3, 1, Eigen::DontAlign>;
namespace kinectfusion {
namespace internal {
namespace cuda {
__device__ __forceinline__
// 三线型插值
float interpolate_trilinearly(
const Vec3fda& point, ... | the_stack |
namespace matrix
{
using namespace h2o4gpu;
void max_index_per_column(Matrix<float>& A, std::vector<int>& result_array, device::DeviceContext& context){
int result;
for (int i=0; i<A.columns(); i++) {
safe_cublas(cublasIsamax(context.cublas_handle, A.rows(), A.data() + i*A.rows(), 1, &result));
result_arr... | the_stack |
#include "br2cu.h"
#include "mcx_core.h"
#include "tictoc.h"
#include "mcx_const.h"
#ifdef USE_MT_RAND
#include "mt_rand_s.cu" // use Mersenne Twister RNG (MT)
#else
#include "logistic_rand.cu" // use Logistic Lattice ring 5 RNG (LL5)
#endif
// optical properties saved in the constant memory
// {x}:mua,{y}:mus,{z... | the_stack |
#include <data/Spin_System.hpp>
#include <engine/Hamiltonian_Heisenberg.hpp>
#include <engine/Vectormath.hpp>
#include <engine/Neighbours.hpp>
#include <engine/FFT.hpp>
#include <engine/Backend_par.hpp>
#include <utility/Constants.hpp>
#include <Eigen/Dense>
#include <Eigen/Core>
#include <complex>
using namespace D... | the_stack |
using at::Tensor;
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename scalar_t>
__device__ scalar_t bilinear_interpolate(const scalar_t* bottom_data, const int height, ... | the_stack |
#include "raytracer.h"
#include "cudautils.h"
namespace RayTracer {
/* Implements the M-T ray-triangle intersection algorithm. */
static __device__ bool intersects(
const float3& vertex1,
const float3& vertex2,
const float3& vertex3,
const float3& startPos, cons... | the_stack |
#include "caffe/layer.hpp"
#include "caffe/layers/l1_loss_layer.hpp"
#include "caffe/solver.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
//#include "caffe/layers/custom_data_layer... | the_stack |
#include <cusp/array1d.h>
#include <cusp/array2d.h>
#include <cusp/complex.h>
#include <cusp/blas/blas.h>
template <class MemorySpace>
void TestAmax(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array;
typedef typename cusp::array1d<float, MemorySpace>::view View;
Array x(6);
View v... | the_stack |
#include "kernels.h"
using namespace cub;
/**
@brief: transform_0213
Split the attention heads and reshape input
during backward progress of encoder self-attention
@thread
gridDim.x = batch_size
gridDim.y = seq_len
blockDim.x = min(hidden_dim, MAX_THREADS)
@param
input: [batch_size, seq_len, hidden_d... | the_stack |
struct LSTMInitParams {
DnnHandle handle;
int batchSize, inputSize, outputSize;
};
LSTMTensors RnnModel::add_lstm_node(Tensor x, Tensor hx, Tensor cx,
ParallelConfig pc, SharedVariable params)
{
assert(x.numDim == 3);
assert(hx.numDim == 2);
assert(cx.numDim == 2);
asser... | the_stack |
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_functions.h>
#include <device_launch_parameters.h>
#include <thrust/random.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sequence.h>
#include <thrust/functional.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include <th... | the_stack |
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under... | the_stack |
#include <doctest.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cuda/cudaflow.hpp>
#include <taskflow/cuda/algorithm/transform.hpp>
#include <taskflow/cuda/algorithm/for_each.hpp>
#include <taskflow/cuda/algorithm/reduce.hpp>
#include <taskflow/cuda/algorithm/scan.hpp>
#include <taskflow/cuda/algorithm/find.h... | the_stack |
#define EMUSYNC __syncthreads();
#else
#define EMUSYNC
#endif
#include "support_kernels.cu"
//Reduce function to get the minimum timestep
__device__ void get_TnextD(const int n_bodies,
double2 *time,
double *tnext, volatile double *sdata) {
//float2 time : x i... | the_stack |
* \test Tests routines for matrix-vector operaions (BLAS level 2) using floating point arithmetic.
**/
//
// *** System
//
#include <iostream>
#include <vector>
//
// *** ViennaCL
//
//#define VIENNACL_DEBUG_ALL
#include "viennacl/scalar.hpp"
#include "viennacl/matrix.hpp"
#include "viennacl/vector.hpp"
#include "v... | the_stack |
#define PI 3.141592653
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREAD... | the_stack |
#include <cassert>
#include <cusolverSp.h>
#include <cusolverSp_LOWLEVEL_PREVIEW.h>
// geometry processing and shape analysis framework
namespace gproshan {
struct cu_spAxb
{
int * A_col_ptrs, * A_row_indices;
real_t * A_values, * x, * b;
cu_spAxb(const int m, const int nnz, const real_t * hA_values, const in... | the_stack |
#include <vector>
// header file to use mshadow
#include "mshadow/tensor.h"
// helper function to load mnist dataset
#include "util.h"
// this namespace contains all data structures, functions
using namespace mshadow;
// this namespace contains all operator overloads
using namespace mshadow::expr;
// define operations... | the_stack |
#include <stdexcept>
#if __CUDA_ARCH__ >= 530
#define CUDA_SUPPORTS_FP16
#endif
//TODO maybe tune this number, it varies by GPU
static const int targetNumThreads = 512;
void splitThreadsAcrossDim01(int dim0Size, int dim1Size, int& threads0, int& blocks0, int& threads1, int& blocks1) {
if(dim0Size > targetNumThread... | the_stack |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_atomic_functions.h"
#include <iostream>
#include <thread>
#include <chrono>
#include <cassert>
#include "cudarad.h"
#include "bsp.h"
#include "bsp_shared.h"
#include "cudabsp.h"
#include "cudamatrix.h"
#include "raytracer.h"
#include ... | the_stack |
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ":... | the_stack |
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/roi_align.hpp>
#include <nbla/cuda/utils/atomic_add.cuh>
#include <nbla/variable.hpp>
namespace nbla {
namespace {
template <typename T> struct Box { T batch_index, x1, y1, x2, y2; };
template <typename T>
__forceinline__ __device... | the_stack |
#include <iostream>
#include <algorithm>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
#include "Common.cuh"
//////////////////////////////
// forward
//////////////////////////////
#if 0
template<typename T=float>
__global__ void kernal_Rea... | the_stack |
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N) {
return std::min(... | the_stack |
#include "allocators.cuh"
#include "broadcast_kernel.cuh"
#include "math_functions.cuh"
namespace minkowski {
namespace detail {
template <class T> struct IsIntType { static const bool value = false; };
template <> struct IsIntType<int> { static const bool value = true; };
template <typename Dtype>
__device__ void... | the_stack |
#include "cudpp_radixsort.h"
#include <cudpp_globals.h>
#include "sharedmem.h"
#include "cta/radixsort_cta.cuh"
/**
* @file
* radixsort_kernel.cu
*
* @brief CUDPP kernel-level radix sorting routines
*/
/** \addtogroup cudpp_kernel
* @{
*/
/** @name RadixSort Functions
* @{
*/
typedef unsigned int uin... | the_stack |
* \test Tests sparse-matrix-dense-matrix products.
**/
//
// include necessary system headers
//
#include <iostream>
#include <cmath>
#include <vector>
#include <map>
//
// ViennaCL includes
//
#include "viennacl/scalar.hpp"
#include "viennacl/vector.hpp"
#include "viennacl/matrix.hpp"
#include "viennacl/linalg/di... | the_stack |
#include "nvblox/core/hash.h"
#include "nvblox/core/indexing.h"
#include "nvblox/core/types.h"
#include "nvblox/core/unified_vector.h"
#include "nvblox/integrators/ray_caster.h"
#include "nvblox/utils/timing.h"
namespace nvblox {
FrustumCalculator::FrustumCalculator() { cudaStreamCreate(&cuda_stream_); }
FrustumCalcu... | the_stack |
int unir(int *res, int rows, int tipo, int **ret, int final)
{
thrust::device_ptr<int> pt, re;
thrust::device_ptr<s2> pt2, re2;
thrust::device_ptr<s3> pt3, re3;
thrust::device_ptr<s4> pt4, re4;
thrust::device_ptr<s5> pt5, re5;
thrust::device_ptr<s6> pt6, re6;
thrust::device_ptr<s7> pt7, re7;
thrust::device_ptr<... | the_stack |
// COMMON
const float AMBIENT = .1;
const uint BLOCK = 128;
int RES;
float FPS;
__constant__ float FPS_;
__constant__ float AGENT_RADIUS;
__constant__ float HALF_SCREEN_WIDTH;
__host__ void initialize(float agent_radius, int res, float fov, float fps) {
RES = res;
cudaMemcpyToSymbol(AGENT_RADIUS, &agent_radi... | the_stack |
#include "../config.cuh"
#include "../util_namespace.cuh"
#include "dispatch/dispatch_adjacent_difference.cuh"
#include <thrust/detail/integer_traits.h>
#include <thrust/detail/cstdint.h>
CUB_NAMESPACE_BEGIN
/**
* @brief DeviceAdjacentDifference provides device-wide, parallel operations for
* computing th... | the_stack |
#include "test_asserts.cuh"
#include <fmt/format.h>
void test_empty()
{
nvbench::int64_axis axis("Empty");
ASSERT(axis.get_name() == "Empty");
ASSERT(axis.get_type() == nvbench::axis_type::int64);
ASSERT(axis.get_size() == 0);
axis.set_inputs({});
ASSERT(axis.get_size() == 0);
const auto clone_base ... | the_stack |
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#define BLOCKSIZE 16
#define BLOC... | the_stack |
namespace AggMIS {
namespace MIS {
namespace Kernels {
__global__ void GenerateRandoms(int size,
int iterations,
unsigned int *randoms,
unsigned int *seeds) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int z = seeds[idx];
int offset = idx;
... | the_stack |
//------------------------------------------------------------------------
// Common op attribute parser.
static __host__ void interpolateParseOpAttributes(OpKernelConstruction* ctx, InterpolateKernelParams& p, bool enableDA)
{
if (enableDA)
{
OP_REQUIRES_OK(ctx, ctx->GetAttr("diff_attrs_all", &p.diff_... | the_stack |
#include <cutil_inline.h>
#include <cutil_math.h>
#include "GlobalDefines.h"
#include "cuda_SimpleMatrixUtil.h"
#define T_PER_BLOCK 16
#define MINF __int_as_float(0xff800000)
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////... | the_stack |
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "Unsqueeze.h"
#include "Unsqueeze.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
insert a dimension by copying the blocks for n times (where n is the si... | the_stack |
#include <algorithm>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/complex128.h"
#include "paddle/fluid/platform/complex64.h"
#include "paddle/fluid/platform/profiler.h"
namespace paddle {
namesp... | the_stack |
extern "C" {
#endif
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "rpsroi_pooling_kernel.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
__global__ void RPSROIPoolForward(const int ... | the_stack |
#define EPSILON 1e-7
#define THREADS 256
#define ZERO Real_t(0)
#define HALF Real_t(0.5)
#define ONE Real_t(1.0)
#define THREE Real_t(3.0)
#define FOUR Real_t(4.0)
#define C1 Real_t(.1111111e-36)
#define C2 Real_t(.3333333e-18)
#define SEVEN Real_t(7.0)
#define EIGHT Real_t(8.0)
#define C1S Real_t(2.0/3.... | the_stack |
#include "caffe/layers/accuracy_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype, typename MItype, typename MOtype>
void AccuracyLayer<Dtype, MItype, MOtype>::GenerateProgram() {
this->device_program_ = this->device_->CreateProgram();
stringstream ss;
ss << this->d... | the_stack |
#include <cub/block/block_load.cuh>
#include <cub/block/block_run_length_decode.cuh>
#include <cub/block/block_store.cuh>
#include <cub/device/device_scan.cuh>
#include <cub/iterator/counting_input_iterator.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include <cub/util_allocator.cuh>
#include "test_util.... | the_stack |
#include <cstdint>
#include <cstdio>
#include <sstream>
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/io/class_io/pointcloud_io.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/filesystem.h"
#include "cupoch/utility/helper.h"
// References for PCD file IO
// http://pointclouds.org/documentat... | the_stack |
#include "Float16.cuh"
//
// Templated wrappers to express math for different scalar and vector
// types, so kernels can have the same written form but can operate
// over half and float, and on vector types transparently
//
namespace faiss { namespace gpu {
template <typename T>
struct Math {
typedef T ScalarType... | the_stack |
#include <random>
#include <vector>
#include "paddle/fluid/operators/fused/fused_dropout_test.h"
#include "paddle/fluid/operators/fused/fused_layernorm_residual_dropout_bias.h"
/**
* @brief The unit test of fused_layernorm_residual_dropout_bias
*/
template <typename T>
struct TestFusedLayernormResidualDropoutBias ... | the_stack |
// GaussianSmoothxy.cu
// 实现对curve的高斯平滑
#include "GaussianSmoothxy.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块尺寸。
#define DEF_BLOCK_X 32
//#define DEF_BLOCK_Y 8
// 宏,定义了五个高斯平滑尺度对应的系数
// Gaussian3,5,7,9,11时分别为: 4,16,64,256,1024.
#define GAUSS_THREE 4
#define GAUSS_FIVE 16
#define GAUSS_SEVEN 64
#define... | the_stack |
__forceinline__ __device__
long long int int2_as_longlong (int2 a)
{
long long int res;
asm ("mov.b64 %0, {%1,%2};" : "=l"(res) : "r"(a.x), "r"(a.y));
return res;
}
// __forceinline__ __device__
// void xcaz(int init, int *c, uint64_t a, uint64_t b)
// {
// switch (init) {
// case 0: *c += __pop... | the_stack |
namespace arboretum_test {
using arboretum::core::BestSplit;
using arboretum::core::GainFunctionParameters;
using arboretum::core::Histogram;
using arboretum::core::HistTreeGrower;
using arboretum::core::InternalConfiguration;
using arboretum::core::my_atomics;
TEST(HistTreeGrower, CreatePartitioningIndexes) {
const... | the_stack |
#include <iostream>
#include <algorithm>
#include "cuhnsw.hpp"
namespace cuhnsw {
CuHNSW::CuHNSW() {
logger_ = CuHNSWLogger().get_logger();
GetDeviceInfo();
// reference: https://stackoverflow.com/a/32531982
switch (major_){
case 2: // Fermi
if (minor_ == 1)
cores_ = mp_cnt_ * 48;
e... | the_stack |
* \file
* Vector type inference utilities
*/
#pragma once
#include <iostream>
#include "util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilModule
* @{
*/
/******************************************************************************
... | the_stack |
#include "HoughRec.h"
#include <iostream>
#include <fstream>
#include <cmath>
#include <vector>
#include "ErrorCode.h"
#include "CoordiSet.h"
using namespace std;
// 宏:BORDER_COLOR
// 定义边界颜色
#define BORDER_COLOR 255
// 宏:BK_COLOR
// 定义背景颜色
#define BK_COLOR 0
// 宏:DEBUG
// 定义是否输出调试信息
//#define DEBUG
// 宏:M_PI
// π 值。... | the_stack |
*
* Code and text by Sean Baxter, NVIDIA Research
* See http://nvlabs.github.io/moderngpu for repository and documentation.
*
******************************************************************************/
#pragma once
#include "../mgpudevice.cuh"
#include "deviceutil.cuh"
#include "intrinsics.cuh"
namespace mgp... | the_stack |
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/cuda/UpSample.cuh>
#include <ATen/native/cuda/KernelUtils.cuh>
#include <ATen/cuda/detail/Ke... | the_stack |
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
constexpr int CAFFE_CUDA_NUM_THREADS = 512;
constexpr int CAFFE_MAXIMUM_NUM_BLOCKS = 4096;
inline int CAFFE_GET_BLOCKS(const int N) {
return s... | the_stack |
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <boost/numeric/odeint.hpp>
#include <boost/numeric/odeint/external/thrust/thrust.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_real.hpp>
#include <boost/random/variate_generator.hpp... | the_stack |
#pragma once
#include <gunrock/app/enactor_base.cuh>
#include <gunrock/app/enactor_iteration.cuh>
#include <gunrock/app/enactor_loop.cuh>
#include <gunrock/oprtr/oprtr.cuh>
#include <gunrock/app/rw/rw_problem.cuh>
#include <math.h>
#include <curand.h>
#include <curand_kernel.h>
namespace gunrock {
namespace app {
na... | the_stack |
namespace AggMIS {
namespace Aggregation {
namespace Kernels {
__global__ void allocateNodesKernel(int size,
int *adjIndexes,
int *adjacency,
int *partIn,
int *partOut,
int *aggregated) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)... | the_stack |
#include <array>
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/base/types.hpp>
#include <ginkgo/core/matrix/csr.hpp>
#include <ginkgo/core/matrix/dense.hpp>
#include "accessor/reduced_row_major.hpp"
#include "core/base/mixed_precision_types.hpp"
#includ... | the_stack |
* \file
* The cub::BlockHistogram class provides [<em>collective</em>](index.html#sec0) methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block.
*/
#pragma once
#include "specializations/block_histogram_sort.cuh"
#include "specializations/block_histogram_atomic.cuh"
#... | the_stack |
#include "CUFLU.h"
#if ( MODEL == HYDRO && defined MHD )
// external functions
#ifdef __CUDACC__
#include "CUFLU_Shared_FluUtility.cu"
#endif
// internal functions
GPU_DEVICE
static real dE_Upwind( const real FC_Ele_L, const real FC_Ele_R, const real FC_Mom, const real D_L, const real D_R,
... | the_stack |
#include "camera_calibration/feature_detection/cuda_refinement_by_symmetry.cuh"
#include <cub/cub.cuh>
#include <libvis/cuda/cholesky_solver.h>
#include <libvis/cuda/cuda_auto_tuner.h>
#include <libvis/cuda/cuda_util.h>
#include <libvis/logging.h>
#include <math_constants.h>
#include "camera_calibration/feature_detec... | the_stack |
#pragma once
#include <gunrock/util/cta_work_distribution.cuh>
#include <gunrock/util/cta_work_progress.cuh>
#include <gunrock/util/kernel_runtime_stats.cuh>
#include <gunrock/priority_queue/near_far_pile.cuh>
#include <gunrock/priority_queue/kernel_policy.cuh>
#include <gunrock/util/test_utils.cuh>
#include <moder... | the_stack |
// RegionGrow.cu
// 实现图像的区域生长操作,串行算法 regionGrow_serial,并行 regionGrow_parallel
#include "RegionGrow.h"
#include <iostream>
#include <fstream>
#include <cmath>
using namespace std;
#include "ErrorCode.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 宏:REGIONGROW_... | the_stack |
#include <numeric>
#include "cupoch/geometry/intersection_test.h"
#include "cupoch/geometry/occupancygrid.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/geometry/voxelgrid.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/helper.h"
#include "cupoch/u... | the_stack |
namespace anakin{
namespace saber{
template <typename dtype, int thread_number>
__global__ void group_normalize_kernel(const dtype* in_data, const dtype* scale,
const dtype* bias, int n, int c, int h, int w, int group,
int group_size, float eps, dtype* out_data, dtype* out_... | the_stack |
#include <algorithm>
#include <cub/cub.cuh>
#include <iostream>
#include <utility>
#include <vector>
#include "HugeCTR/include/common.hpp"
#include "HugeCTR/include/data_simulator.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/data.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/infrequent_embed... | the_stack |
#define USE_HEIGHTS
#include "deviceCode.h"
#include "owl/common/math/random.h"
namespace cdf {
extern "C" __constant__ LaunchParams optixLaunchParams;
typedef owl::common::LCG<4> Random;
inline __device__
vec3f backGroundColor()
{
const vec2i pixelID = owl::getLaunchIndex();
const float t = pi... | the_stack |
#pragma once
#include <thrust/sequence.h>
#include <cub/cub.cuh>
#include "open3d/ml/impl/misc/MemoryAllocation.h"
#include "open3d/utility/Helper.h"
#include "open3d/utility/MiniVec.h"
namespace open3d {
namespace ml {
namespace impl {
namespace {
using namespace open3d::utility;
template <class T, bool LARGE_A... | the_stack |
#include "Device/DataMovement/Indexing.cuh"
#include "Device/Util/DeviceProperties.cuh"
#include "Device/Util/Basic.cuh"
#include "Host/Algorithm.hpp"
namespace xlib {
namespace detail {
template<unsigned PARTITION_SIZE, typename T>
__global__ void blockPartition(const T* __restrict__ d_prefixsum,
... | the_stack |
#include "core/providers/cuda/cu_inc/common.cuh"
#include "layer_norm_impl.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace onnxruntime::cuda;
template <typename U, bool simplified>
__device__ void cuWelfordOnlineSum(
const U curr,
U& mu,
U& sigma2,
U& count) {
count... | the_stack |
#include "constitutive_models.cuh"
#include "particle_buffer.cuh"
#include "settings.h"
#include "utility_funcs.hpp"
#include <MnBase/Algorithm/MappingKernels.cuh>
#include <MnBase/Math/Matrix/MatrixUtils.h>
#include <MnSystem/Cuda/DeviceUtils.cuh>
namespace mn {
using namespace config;
using namespace placeholder;
... | the_stack |
# include "QuEST.h"
# include "QuEST_precision.h"
# include "QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey
# include "mt19937ar.h"
# include <stdlib.h>
# include <stdio.h>
# include <math.h>
# define REDUCE_SHARED_SIZE 512
# define DEBUG 0
/*
* struct types for concisely passing unitaries to ke... | the_stack |
using namespace tvgutil;
#ifdef _MSC_VER
// Suppress some VC++ warnings that are produced when including the Thrust headers.
#pragma warning(disable:4244 4267)
#endif
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#ifdef _MSC_VER
// Reenable the suppressed warnings for the rest of the translation unit... | the_stack |
#include "crys_kernel.cu"
#define CUDA_ERRCK { cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \
exit (-1); }}
uint4* d_Block_Work;
uint2* d_FinalReduce;
float *d_Output, *d_ReductionSum;
cudaArray *d_Coors, ... | the_stack |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.