text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <cublas_v2.h>
#include <cusparse.h>
#ifdef SOLVER
#include <cusolverDn.h>
#endif
#include "helper_cuda.h"
#include "kernel_impl.cuh"
#include "thrust/device_ptr.h"
#include "thrust/device_vector.h"
#include "thrust/for_each.h"
#include "thrust/iterator... | the_stack |
#include "SharedMemory.cuh"
// INTEGER BASED
#include "i_Sum_i.cuh"
#include "i_MinIdx_2i.cuh"
#include "i_MaxIdx_2i.cuh"
#include "i_MinIdxMaxIdx_4i.cuh"
// SINGLE BASED
#include "f_Sum_f.cuh"
#include "f_MinMax_2f.cuh"
#include "f_MinIdx_fi.cuh"
#include "f_MinIdx_ff.cuh"
#include "f_MaxIdx_fi.cuh"
#include "f_MaxI... | the_stack |
#include <fstream>
#include <ios>
#include <iostream>
#include <map>
#include <iterator>
#include <algorithm>
#include <amgx_types/util.h>
#include <amgx_types/io.h>
namespace amgx
{
template <typename T>
void LoadValueFromStream(std::ifstream &fin, T &val);
template <>
void LoadValueFromStream(std::ifstream &fin, ... | the_stack |
#define DI __device__
//! Thread-local Matrix-Vector multiplication.
template <int n>
DI void Mv_l(const double* A, const double* v, double* out)
{
for (int i = 0; i < n; i++) {
double sum = 0.0;
for (int j = 0; j < n; j++) {
sum += A[i + j * n] * v[j];
}
out[i] = sum;
}
}
template <int n>
D... | the_stack |
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
// CHECK: #include <hip/hip_runtime.h>
#include <cuda_runtime.h>
// CHECK: #include <hipblas.h>
#include <cublas_v2.h>
// CHECK: #include "hipsparse.h"
#include "cusparse.h"
void printMatrix(int m, int n, const double*A, int lda, const char* name)
{
for(in... | the_stack |
#define debug_aml(a...)
//#define debug_aml(a...) {printf("%s:%d ", __FILE__, __LINE__); printf(a); \
printf("\n");}
#pragma once
#ifdef BOOST_FOUND
// Boost includes for CPU Push Relabel Max Flow reference algorithms
#include <boost/config.hpp>
#include <iostream>
#include <string>
#include <boost/graph/edmonds_... | the_stack |
//typedef unsigned char BitSequence;
#include "cuda_helper.h"
#include "cuda_vector.h"
static __constant__ uint32_t d_T512[4096/4] = {
0xef0b0270, 0x3afd0000, 0x5dae0000,
0x69490000, 0x9b0f3c06, 0x4405b5f9,
0x66140a51, 0x924f5d0a, 0xc96b0030,
0xe7250000, 0x2f840000, 0x264f0000,
0x08695bf9, 0x6dfcf137, 0x509f698... | the_stack |
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/transform.h>
#include <catch2/catch.hpp>
#include <cuco/static_map.cuh>
namespace {
namespace cg = cooperative_groups;
// User-defined logical algorithms to reduce compilation time
template <typename Iterator, typename Predicate>
bool a... | the_stack |
namespace fastertransformer{
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
... | the_stack |
// RobustEdgeDetection.cu
// 实现图像的边缘检测算法。
#include "RobustEdgeDetection.h"
#include<stdio.h>
// 宏:SA_DEF_BLOCK_X 和 SA_DEF_BLOCK_Y 和 SA_DEF_BLOCK_Z
// 定义了默认单纯平均法线程快的尺寸。
#define SA_DEF_BLOCK_X 32
#define SA_DEF_BLOCK_Y 2
#define SA_DEF_BLOCK_Z 4
// 宏:FV_DEF_BLOCK_X 和 FV_DEF_BLOCK_Y 和 FV_DEF_BLOCK_Z
// 定义了默... | the_stack |
#include <thrust/count.h> //count
#include <thrust/sort.h> //sort
#include <thrust/unique.h> //unique
#include <cusp/detail/format_utils.h> //offsets_to_indices
namespace amgx
{
namespace aggregation
{
namespace size4_selector
{
// include common routines for all selectors
#include <aggregation/selectors/common_selec... | the_stack |
#ifndef INCLUDE_GGNN_CUDA_KNN_GGNN_GPU_INSTANCE_CUH_
#define INCLUDE_GGNN_CUDA_KNN_GGNN_GPU_INSTANCE_CUH_
#include <array>
#include <limits>
#include <string>
#include <thread>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <cub/cub.cuh>
#inclu... | the_stack |
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/communicator/data_parallel_communicator.hpp>
#include <algorithm>
#include <cstdlib>
#include <memory>
namespace nbla {
using std::make_shared;
template <typename T>
__global__ void kernel_divide_inplace(const int size, co... | the_stack |
#include <iostream>
#include "LinearFilter.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块尺寸
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// static变量:_defTpl
// 当用户未定义有效的模板时,使用此默认模板,默认为3x3,默认模板值为1
static TemplateCuda *_defTpl = NULL;
// Host 函数:_initDefTemplate(初始化默认的模板指针)
// 函数初始化默认模板指针 _defTpl,如果原来模板不为空,则直接返回,否... | the_stack |
//#define SOLVE_ZERO_INI_GUESS
#define DEBUG
namespace amgx
{
// parameter is used as test name
DECLARE_UNITTEST_BEGIN(ProfileTest);
struct TestCase
{
std::string file_name;
std::string config_string;
bool extract_diagonal;
bool insert_diagonal;
bool use_pre_setup;
bool use_replace;
TestC... | the_stack |
step of the computation. */
#ifndef _BISECT_KERNEL_LARGE_H_
#define _BISECT_KERNEL_LARGE_H_
// includes, project
#include "config.h"
#include "util.h"
// additional kernel
#include "bisect_util.cu"
// declaration, forward
////////////////////////////////////////////////////////////////////////////////
//! Write da... | the_stack |
template <class T>
__device__
void plus_prescan( T *a, T *b) {
T av = *a;
T bv = *b;
*a = bv;
*b = bv + av;
}
/// bitonic_sort: sort 2*LOCAL_THREADCOUNT elements
template <class T>
__device__
void bitonic_sort( T* sh_data, const uint localid)
{
for (uint ulevel = 1; ulevel < LQSORT_LOCAL_WOR... | the_stack |
//#define DEBUG
//#define DEBUGX
namespace amgx
{
// parameter is used as test name
DECLARE_UNITTEST_BEGIN(AmgLevelsReuse);
struct TestCase
{
std::string config_string;
bool insert_diagonal;
bool use_pre_setup;
TestCase(): use_pre_setup(true), insert_diagonal(false){}
};
std::vector<double> test_mai... | the_stack |
//variable definition
#define F 100
#define TILE_SIZE F/10
#define SCAN_BATCH 30
#define THETA_BATCH 3
#define X_BATCH 240
#define ITERS 10
#define M 50082603
#define N 39780
#define NNZ 3101144313
#define NNZ_TEST 344573330
//0.05 when use both "full" kernels
#define LAMBDA 0.048
//hardware specific
#define GPU_COUN... | the_stack |
typedef enum test_enum {
test_set_1, test_swap_1, test_add_1, test_negate_1, test_sub_1,
test_mul_1, test_mul_high_1, test_sqr_1, test_sqr_high_1, test_div_1, test_rem_1,
test_div_rem_1, test_sqrt_1, test_sqrt_rem_1, test_equals_1, test_equals_2, test_equals_3, test_compare_1, test_compare_2,
test_compare_3, te... | the_stack |
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/io/coded_stream.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/util_img.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/proto/c... | the_stack |
// workaround issue between gcc >= 4.7 and cuda 5.5
#if (defined __GNUC__) && (__GNUC__>4 || __GNUC_MINOR__>=7)
#undef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_USE_INT128
#endif
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#include "main.h"
#include "gpu_common.h"
// Check that den... | the_stack |
#pragma once
#include <cuda_runtime.h>
#include <libvis/cuda/cuda_buffer.cuh>
#include <libvis/libvis.h>
#include <libvis/opengl.h>
#include "badslam/cost_function.cuh"
#include "badslam/cuda_matrix.cuh"
#include "badslam/cuda_util.cuh"
#include "badslam/surfel_projection.cuh"
#include "badslam/util.cuh"
#include "b... | the_stack |
#include "./physlib/R2grid.h" // Grid2d
#include "./physlib/dev_R2grid.h" // dev_Grid2d
#include "./commonlib/surfObj2d.h" // SurfObj2d
#include "./commonlib/checkerror.h" // checkCudaErrors
constexpr const int L_X { 128 } ; // WIDTH
constexpr const int L_Y { 64 } ; // HEIGHT
// Simple copy kernel
/* http://docs... | the_stack |
#include <ops/declarable/helpers/random.h>
//#include <NativeOps.h>
#include <vector>
#include <memory>
#include <graph/Context.h>
#include <helpers/RandomLauncher.h>
#include <helpers/ShapeUtils.h>
#include <array/NDArrayFactory.h>
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
#include ... | the_stack |
#include <nvbio/basic/numbers.h>
#include <nvbio/basic/algorithms.h>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/transform_iterator.h>
#include <nvbio/basic/vector_view.h>
#include <nvbio/basic/primitives.h>
#include <nvbio/alignment/alignment.h>
#include <nvbio/alignment/batched.h>
#include <thrust/iterator/c... | the_stack |
#include "HugeCTR/include/embeddings/hybrid_embedding/hybrid_indices.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/utils.cuh"
#include "HugeCTR/include/utils.cuh"
namespace indices_kernels {
template <typename dtype>
__global__ void fused_cache_masks(const dtype* __restrict__ samples,
... | the_stack |
#include <vector_types.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <stdio.h>
#include <limits>
namespace pcl
{
namespace cuda
{
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
SampleConsensusModelPlane<Storage... | the_stack |
__device__ __forceinline__ float
getMinTime (const float3& volume_max, const float3& origin, const float3& dir)
{
float txmin = ( (dir.x > 0 ? 0.f : volume_max.x) - origin.x) / dir.x;
float tymin = ( (dir.y > 0 ? 0.f : volume_max.y) - origin.y) / dir.y;
float tzmin = ( (dir.z > 0 ? 0.f : volume_max.z) - origin.z)... | the_stack |
extern "C" {
// CUDA version of the components in
// "ai_economist.foundation.components.covid19_components.py"
__global__ void CudaControlUSStateOpenCloseStatusStep(
int * stringency_level,
const int kActionCooldownPeriod,
int * action_in_cooldown_until,
const int * kDefault... | the_stack |
#include <glog/logging.h>
#include <cmath>
#include <memory>
#include <vector>
namespace dietgpu {
template <FloatType FT, int Threads>
struct SplitFloatNonAligned {
static __device__ void split(
const typename FloatTypeInfo<FT>::WordT* in,
uint32_t size,
typename FloatTypeInfo<FT>::CompT* compOut... | the_stack |
#include <curand.h>
#include <curand_kernel.h>
#define TPB 32
namespace {
// FORWARD KERNELS
extern "C" __global__ void logsum_kernel0( float* __restrict__ A, float* __restrict__ B, float* __restrict__ C) {
float M[64];
__shared__ float A_shared[4096];
__shared__ float B_shared[4096];
float A_shared_lo... | the_stack |
#include "TorusSegmentation.h"
// 宏:DEF_BLOCK_1D
// 定义了默认的 1D 线程块的尺寸。
#define DEF_BLOCK_1D 256
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了二维结构的并行线程块默认的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 宏:DEF_BLACK 和 DEF_WHITE
// 定义了黑色和白色的像素值。
#define DEF_BLACK 0
#define DEF_WHITE 255
// 核函数:_initLblM... | the_stack |
* \file
* The cub::BlockExchange class provides [<em>collective</em>](index.html#sec0) methods for rearranging data partitioned across a CUDA thread block.
*/
#pragma once
#include "../util_ptx.cuh"
#include "../util_arch.cuh"
#include "../util_macro.cuh"
#include "../util_type.cuh"
#include "../util_name... | the_stack |
#include <nvbio/basic/numbers.h>
#include <nvbio/basic/algorithms.h>
#include <nvbio/basic/priority_queue.h>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/transform_iterator.h>
#include <nvbio/basic/vector_view.h>
#include <nvbio/basic/primitives.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust... | the_stack |
#include <taskflow/cuda/cudaflow.hpp>
#include <iomanip>
#include <cfloat>
#include <climits>
#define L2(x1, y1, x2, y2) ((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))
// ----------------------------------------------------------------------------
// CPU (sequential) implementation
// ------------------------------------------... | the_stack |
namespace CUDAKernel{
Norm Norm::mean_std(const float mean[3], const float std[3], float alpha, ChannelType channel_type){
Norm out;
out.type = NormType::MeanStd;
out.alpha = alpha;
out.channel_type = channel_type;
memcpy(out.mean, mean, sizeof(out.mean));
memcpy(out.std, std, sizeof(out.std));
retu... | the_stack |
#include "octnet/gpu/pool.h"
#include "octnet/gpu/gpu.h"
#include <cstdlib>
__global__ void kernel_gridpool2x2x2_struct(octree out, int n_blocks, ot_size_t feature_size, const octree in) {
CUDA_KERNEL_LOOP(out_grid_idx, n_blocks) {
ot_tree_t* otree = octree_get_tree(&out, out_grid_idx);
int gn,ogd,ogh,ogw;... | the_stack |
#include "fringe/cuda/cudaUtils.h"
#include "fringe/cuda/ulongmask.h"
#include "KS2sample_cuda.h"
#include <math.h>
#include <iostream>
#define THRD_PER_BLOCK 96
//Constant memory for constant input values
//Done this way in topozero. Need to understand
//why this cannot be put in part of the struct
__constant__ doub... | the_stack |
#pragma once
#include <gunrock/app/problem_base.cuh>
namespace gunrock {
namespace app {
namespace bfs {
enum Direction {
FORWARD = 0,
BACKWARD = 1,
UNDECIDED = 2,
};
/**
* @brief Speciflying parameters for BFS Problem
* @param parameters The util::Parameter<...> structure holding all parameter
* info \... | the_stack |
namespace cgbn {
#if 1
template<class env>
__device__ __forceinline__ void core_t<env>::mont_mul(uint32_t r[LIMBS], const uint32_t a[LIMBS], const uint32_t b[LIMBS], const uint32_t n[LIMBS], const uint32_t np0) {
uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1;
uint32_t t, t0, t1, q, r1, ra[LIMBS+2], r... | the_stack |
namespace cg = cooperative_groups;
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////////////
// A structure of 2D points (structure of arrays).
////////////////////////////////////////////////////////////////////////////////
class Points {
float *m_x;
float *m_y;
p... | the_stack |
// DownSampleImage.cu
// 实现对图像的缩小处理
#include <iostream>
using namespace std;
#include "DownSampleImage.h"
#include "ErrorCode.h"
#include "stdio.h"
#include "time.h"
#include "stdlib.h"
#include "curand.h"
#include "curand_kernel.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块尺寸。
#define DEF_BLOCK_X 32
#define DEF... | the_stack |
#include <opencv2/core.hpp>
#include "labeling_algorithms.h"
#include "labels_solver.h"
#include "memory_tester.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <cstdio>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <opencv2/core.hp... | the_stack |
#ifdef __INTELLISENSE__
//#define __CUDA_ARCH__ 210
#define __CUDACC__
#include <cuda_helper.h>
#include <cuda_texture_types.h>
#define __byte_perm(a,b,c) (a)
#define tex1Dfetch(t, n) (n)
#endif
#define USE_SHARED 1
static unsigned int *d_textures[MAX_GPUS][8];
#define PC32up(j, r) ((uint32_t)((j) + (r)))
#define ... | the_stack |
#include "base_strategy.cuh"
#include <cuco/static_map.cuh>
// this is needed by cuco as key, value must be bitwise comparable.
// compilers don't declare float/double as bitwise comparable
// but that is too strict
// for example, the following is true (or 0):
// float a = 5;
// float b = 5;
// memcmp(&a, &b, sizeof... | the_stack |
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/detail/column_utilities.hpp>
#include <jit/type.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/distance.h>
#incl... | the_stack |
#include "common.cuh"
#include <kat/on_device/sequence_ops/grid.cuh>
#include <kat/on_device/sequence_ops/block.cuh>
#include <kat/on_device/sequence_ops/warp.cuh>
#include <cuda/api_wrappers.hpp>
#include <limits>
#include <algorithm>
#include <numeric>
#include <iostream>
#include <iomanip>
using std::size_t;
usi... | the_stack |
#pragma once
#include <gunrock/util/device_intrinsics.cuh>
#include <gunrock/util/track_utils.cuh>
#include <gunrock/util/sort_device.cuh>
#include <gunrock/app/enactor_base.cuh>
#include <gunrock/app/enactor_iteration.cuh>
#include <gunrock/app/enactor_loop.cuh>
#include <gunrock/app/pr/pr_problem.cuh>
#include <gunr... | the_stack |
#include "Template.h"
#include <iostream>
#include <fstream>
using namespace std;
#include "ErrorCode.h"
// Host 静态方法:newTemplate(创建模板)
__host__ int TemplateBasicOp::newTemplate(Template **outtpl)
{
// 检查用于盛放新模板的指针是否为 NULL。
if (outtpl == NULL)
return NULL_POINTER;
// 申请一个新的 TemplateCuda 型数据,本方法... | the_stack |
void test_pp_stringize()
{
ASSERT_EQUAL(
std::string(THRUST_PP_STRINGIZE(int))
, "int"
);
ASSERT_EQUAL(
std::string(THRUST_PP_STRINGIZE(hello world))
, "hello world"
);
ASSERT_EQUAL(
std::string(THRUST_PP_STRINGIZE(hello world))
, "hello world"
);
ASSERT_EQUAL(
std::string(THRUST... | the_stack |
using namespace std;
namespace std {
template <typename _CharT, typename _Traits>
inline basic_ostream<_CharT, _Traits> &
tab(basic_ostream<_CharT, _Traits> &__os) {
return __os.put(__os.widen('\t'));
}
}
std::string stringPadding(std::string original, size_t charCount)
{
original.resize(charCount, ' '... | the_stack |
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/reverse.h>
#include <thrust/sequence.h>
template <typename InputT,
typename OutputT,
int LogicalWarpThreads,
int ItemsPerThread,
int BlockThreads,
typename ActionT>
__global__ void kerne... | the_stack |
#include <doctest.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cuda/cudaflow.hpp>
#include <taskflow/cuda/algorithm/for_each.hpp>
#include <taskflow/cuda/algorithm/transform.hpp>
#include <taskflow/cuda/algorithm/reduce.hpp>
#include <taskflow/cuda/algorithm/sort.hpp>
#include <taskflow/cuda/algorithm/find.hp... | the_stack |
#include <ATen/cuda/CUDAApplyUtils.cuh>
#define CHECK_CUDA(x) \
TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) \
TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
namespace {
int const thread... | the_stack |
#include <cassert>
#include <cuda_runtime.h>
#pragma GCC diagnostic ignored "-Wunknown-pragmas"
#include <cudf/column/column_device_view.cuh>
#include <cudf/datetime.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/typ... | the_stack |
static inline void THNN_(VolumetricFullDilatedConvolution_shapeCheck)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *weight,
THCTensor *bias,
int dT, int dW, int dH,
int padT, int padW, int pa... | the_stack |
namespace anakin {
namespace saber {
const int TRANS_BLOCK_SIZE = 16;
template <typename Dtype>
__global__ void ker_permute_fwd(Dtype * out_data, const int num_axes,\
const int count, const int * permute_order,\
const int * new_steps, const int * old_steps,\
... | the_stack |
#pragma once
#include <Cuda/Common/Palatte.h>
#include <math_constants.h>
#include "ScalableTSDFVolumeCudaDevice.cuh"
namespace open3d {
namespace cuda {
__global__ void CreateKernel(ScalableTSDFVolumeCudaDevice server) {
const size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= server.value_ca... | the_stack |
#define NUM_THREADS 64
#define AE_DIM 32
__device__ __forceinline__ float sigmoid(float x) {
return exp(x) / (exp(x) + 1.0);
}
__device__ __forceinline__ void
se3_transform_point_inplace(const float T[7], float X[3]) {
const float tx=T[0], ty=T[1], tz=T[2];
const float qx=T[3], qy=T[4], qz=T[5], qw=T[6];
... | the_stack |
#include <vector>
#include <iostream>
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Parallel.h>
// #include "utils.cuh"
#include <Eigen/Sparse>
#include <Eigen/SparseCore>
#include <Eigen/SparseCholesky>
typedef Eigen::SparseMatrix<double> SpMat;
typedef Eigen::Triplet<double> T;
typedef s... | the_stack |
#pragma once
#include <ptx_primitives.cuh>
namespace IndexQueueAccessControl
{
struct AtomicCheckedAbortOnOverflow
{
template<unsigned int SIZE, class T>
__device__
static int enqueue(const T& element, int& count, unsigned int& back, T* indices, T UNUSED)
{
int fill = atomicAdd(&count, 1);
... | the_stack |
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
#include <cufft.h>
extern int nblock_size;
extern int maxgsx;
static cudaError_t crc;
static cufftResult cfrc = CUFFT_SUCCESS;
static cufftHandle planrx = 0, planxr = 0, planrxn = 0, planxrn = 0;
static cufftHandle plany = 0, planyn = 0;
__global__ void gpupp... | the_stack |
#include <iostream>
//headers in local files
#include "lidar_point_pillars/common.h"
#include "lidar_point_pillars/preprocess_points_cuda.h"
__global__ void make_pillar_histo_kernel(
const float* dev_points,
float* dev_pillar_x_in_coors,
... | the_stack |
using namespace mgpu;
using namespace thrust::placeholders;
vector<void*> alloced_mem;
template<typename T>
struct distinct : public binary_function<T,T,T>
{
__host__ __device__ T operator()(const T &lhs, const T &rhs) const {
return lhs != rhs;
}
};
struct gpu_getyear
{
const int_type *source;... | the_stack |
__constant__ size_t CUDA_minChunkY;
__constant__ size_t CUDA_lookupSize;
__device__ Vec3* CUDA_vertices;
__device__ Vec2* CUDA_texCoords;
__device__ Triangle* CUDA_triangles;
__device__ color* CUDA_lookupColors;
__device__ uint16_t* CUDA_lookupIndices;
namespace CUDA {
void setMinChunkY(size_t chunkY) {
checkCUD... | the_stack |
#include "bsp.h"
#include "cudabsp.h"
#include "cudarad.h"
#include "cudautils.h"
static __device__ inline float luma_from_rgb(float3 rgb) {
//return sqrt(dot(rgb / 255.0, make_float3(0.299, 0.587, 0.114)));
return sqrt(dot(rgb / 255.0f, make_float3(1.0f)));
}
static __device__ inline float clamp(float x, ... | the_stack |
// Make sure we don't allow dynamic initialization for device
// variables, but accept empty constructors allowed by CUDA.
// RUN: %clang_cc1 -verify %s -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 %s
#ifdef __clang__
#include "Inputs/cuda.h"
#endif
// Use the types we share with CodeGen tests.
#include "... | the_stack |
#include "dcn_v2_psroi_pooling_cuda_double.h"
#include <cstdio>
#include <algorithm>
#include <cstring>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int ... | the_stack |
__global__ void copy_to_fft_input(volatile float *__restrict__ fft_input,
const float *w_coefficients_device,
const int n_fft_coeffs,
const int n_fft_coeffs_half,
const int n_terms)
{
... | the_stack |
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
int nblock_size = 64;
int ngrid_size = 1;
int maxgsx = 65535;
int mmcc = 0;
static int devid;
static cudaError_t crc;
__global__ void emptyKernel() {}
/*--------------------------------------------------------------------*/
extern "C" void gpu_setgbsize(int n... | the_stack |
#include <cub/cub.cuh>
#include <limits>
#include <raft/cuda_utils.cuh>
#include <raft/distance/detail/pairwise_distance_base.cuh>
#include <raft/linalg/contractions.cuh>
#include <stdint.h>
namespace raft {
namespace distance {
namespace detail {
#if (ENABLE_MEMCPY_ASYNC == 1)
#include <cuda_pipeline.h>
using namesp... | the_stack |
#include <NvInfer.h>
#include <cassert>
#include <cstring>
#include <vector>
#include <cub/cub.cuh>
#include "trt_engine/trt_network_crt/plugins/common/plugin_util.h"
#include "trt_engine/trt_network_crt/plugins/layer_norm_plugin/layer_norm_plugin.h"
using namespace nvinfer1;
FWD_TRT_NAMESPACE_BEGIN
template <typ... | the_stack |
// ConvexHull.cu
// 凸壳算法实现。
#include "ConvexHull.h"
#include <stdio.h>
// 宏:CH_DEBUG_KERNEL_PRINT(Kernel 调试打印开关)
// 打开该开关则会在 Kernel 运行时打印相关的信息,以参考调试程序;如果注释掉该
// 宏,则 Kernel 不会打印这些信息,但这会有助于程序更快速的运行。
//#define CH_DEBUG_KERNEL_PRINT
// 宏:CH_DEBUG_CPU_PRINT(CPU版本 调试打印开关)
// 打开该开关则会在 CPU 版本运行时打印相关的信息,以参考调试程序;如果注释掉该
// 宏... | the_stack |
#ifdef USE_CUDA
#include "thrust/device_vector.h"
#endif
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype, typename MItype, typename MOtype>
void SoftmaxLayer<Dtype, MItype, MOtype>::GenerateProgram() {
this->device_program_ = this->device... | the_stack |
int main()
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int inputs = 1000;
int size = 1000;
ai::TensorCUDA_float weights(size * inputs);
ai::TensorCUDA_float deltas(size * inputs);
weights.fill(1);
ai::TensorCUDA_float bias(size);
ai::TensorCUDA_float errors(size);
//errors.... | the_stack |
// FillUp.cu
// 实现对输入图像像素的处理
#include <iostream>
using namespace std;
#include "FillUp.h"
#include "ErrorCode.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// static变量:_defTpl
// 当用户未定义有效的模板时,使用此默认模板,默认为 3 x 3。
static Template *_defTpl = NULL;
// Kernel 函数:_fil... | the_stack |
#include <map>
struct GPUScene
{
Primitive* primitives;
int numPrimitives;
Primitive* lights;
int numLights;
Sky sky;
BVH bvh;
};
#define kBsdfSamples 1.0f
#define kProbeSamples 1.0f
#define kRayEpsilon 0.0001f
#define LAUNCH_BOUNDS __launch_bounds__(256, 4)
__device__ inline int getGlobalIndex()
{
int bl... | the_stack |
#include <cudf/io/types.hpp>
#include <cudf/utilities/span.hpp>
#include <io/utilities/trie.cuh>
#include "column_type_histogram.hpp"
#include <rmm/device_uvector.hpp>
#include <thrust/execution_policy.h>
#include <thrust/iterator/reverse_iterator.h>
#include <optional>
using cudf::device_span;
namespace cudf {
n... | the_stack |
#include "Static/TriangleCounting/triangle.cuh"
using namespace hornets_nest;
namespace hornets_nest {
__device__ __forceinline__
void initialize(degree_t diag_id,
degree_t u_len,
degree_t v_len,
vid_t* __restrict__ u_min,
vid_t* __restrict__ u_max,
... | the_stack |
namespace at { namespace native {
namespace {
template<typename T, template<class> class Op>
struct BinaryOpScalarFunctor_ {
__device__ void operator() (
int chunk_size,
TensorListMetadata<1>& tl,
T scalar) {
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chun... | the_stack |
#include <thrust/extrema.h> // for thrust::max_element
namespace amgx
{
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int threads_per_block, int warps_per_block, bool diag>
__global__
void getLambdaEstimate(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *value... | the_stack |
#include "include/common.h"
#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 32
using Matf31da = Eigen::Matrix<float, 3, 1, Eigen::DontAlign>;
namespace kinectfusion {
namespace internal {
namespace cuda {
template<int SIZE>
static __device__ __forceinline__
void reduce(v... | the_stack |
namespace faiss { namespace gpu {
// Number of warps that the kernel is instantiated with
constexpr int kWarps = 8;
constexpr int kLanes = kWarpSize;
constexpr int kMaxDistance = std::numeric_limits<int>::max();
// Performs a binary matrix multiplication, returning the lowest k results in
// `vecs` for each `query` ... | the_stack |
Implements the Romein convolutional algorithm onto a GPU using CUDA.
*/
#include <iostream>
#include <bifrost/romein.h>
#include "romein_kernels.cuh"
#include "assert.hpp"
#include "trace.hpp"
#include "utils.hpp"
#include "cuda.hpp"
#include "cuda/stream.hpp"
#include "Complex.hpp"
struct __attribute__((aligned(1... | the_stack |
extern "c" __sync_lock_test_and_set(...);
using namespace std;
/* TODO:
- Allow it to run with max_width > 512 (maximum thread block width)
- tanh function that gives bit-for-bit equivalent results as on the
host
- Remove learning rate from the update (apply it when updating the weights)
and use a... | the_stack |
* \file
* The cub::BlockRadixSort class provides [<em>collective</em>](index.html#sec0) methods for radix sorting of items partitioned across a CUDA thread block.
*/
#pragma once
#include "block_exchange.cuh"
#include "block_radix_rank.cuh"
#include "../util_ptx.cuh"
#include "../util_arch.cuh"
#include "../util_t... | the_stack |
namespace cg = cooperative_groups;
// Overload CUDA atomic for other 64bit unsinged/signed integer type
__forceinline__
__device__ long atomicAdd(long* address, long val)
{
return (long)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
__forceinline__
__device__ long long atomicAdd(long long* addr... | the_stack |
namespace kernels {
template <typename I>
__global__ void try_out_integral_math_functions(I* results, I* __restrict expected)
{
size_t i { 0 };
bool print_first_indices_for_each_function { false };
auto maybe_print = [&](const char* section_title) {
if (print_first_indices_for_each_function) {
printf("%-30s t... | the_stack |
__device__ ulonglong8to16 *state2;
uint32_t *d_YNonce[MAX_GPUS];
__constant__ uint32_t pTarget[8];
__constant__ uint32_t c_data[32];
__constant__ uint16 shapad;
static uint32_t *d_hash[MAX_GPUS];
static uint8* d_hash2[MAX_GPUS];
static uint32* d_hash3[MAX_GPUS];
static uint32* d_hash4[MAX_GPUS];
#define xo... | the_stack |
#include "common.h"
#include "bn.h"
/*
* Device functions and data structures
*/
struct Float2 {
float v1, v2;
__device__ Float2() {}
__device__ Float2(float _v1, float _v2) : v1(_v1), v2(_v2) {}
__device__ Float2(float v) : v1(v), v2(v) {}
__device__ Float2(int v) : v1(v), v2(v) {}
__device__ Float2 &op... | the_stack |
#include "CUFLU.h"
#if ( MODEL == HYDRO && ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) )
// external functions
#ifdef __CUDACC__
#include "CUFLU_Shared_FluUtility.cu"
#else
void Hydro_Rotate3D( real InOut[], const int XYZ, const bool Forward, const int Mag_Offset );
void Hydro_Con2Pri(... | the_stack |
* \file
* Random-access iterator types
*/
#pragma once
#include "thread/thread_load.cuh"
#include "util_device.cuh"
#include "util_debug.cuh"
#include "util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/***********************************************************... | the_stack |
#include "caffe/layers/lrn_layer.hpp"
#include "caffe/util/benchmark.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#ifdef TODO_REFACTOR
#ifdef USE_CUDA
template<typename Dtype, typename MItype, typename MOtype>
__global__ void LRNFillScale(const int_tp nthreads, const Dtype* const in,
... | the_stack |
#include <cooperative_groups.h>
#if ( __CUDACC_VER_MAJOR__ > 10 )
#include <cooperative_groups/reduce.h>
#endif
namespace cg = cooperative_groups;
// Check if C++17 is being used
#if __cplusplus >= 201703L
#include <thrust/complex.h>
///////////////////////////////////////////////////////////////////////////////
// ... | the_stack |
extern "C" {
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
}
#include <miner.h>
#include <cuda_helper.h>
#include <cuda_vector_uint2x4.h> // todo
#include "wildkeccak.h"
extern char *device_config[MAX_GPUS]; // -l
extern uint64_t* pscratchpad_buff;
static uint64_t* d_input[MAX_GPUS];
static uint32_t*... | the_stack |
__device__
inline size_t pre_fftshift(size_t offset,
CallbackData* cb) {
// For inverse transforms with apply_fftshift=true, we cyclically shift
// the input data here by modifying the read offset.
if( cb->do_fftshift && cb->inverse ) {
for( int d=0; d<cb->ndim; ++d ) {
// Co... | the_stack |
#include <vector>
#include "caffe/layers/detectnet_transform_layer.hpp"
#include "caffe/util/detectnet_coverage.hpp"
#include "caffe/util/gpu_memory.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
// Calculate the location in the image from the loop index
__device__ void get_pixel_indices(const int l... | the_stack |
#include "test.hpp"
class ActiveElement
{
public:
__host__
__device__
ActiveElement()
{
val += 100000;
}
__host__
__device__
~ActiveElement()
{
val += 1000000;
}
inline bool operator==(ActiveElement other) const
{
return val == other.val;
}
... | the_stack |
#include <stdio.h>
#include <assert.h>
#include <algorithm>
#include <cuda_runtime_api.h>
#include <device_atomic_functions.h>
#include <device_launch_parameters.h>
#define W 32
#define G 1024
#define B 256
__forceinline__ __device__ static int idx2(int n, int u, int U1) {
return n * U1 + u;
}
__forceinline__ __... | the_stack |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.