text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
import * as Motorcycle from '@motorcycle/run'
import * as assert from 'assert'
import * as most from 'most'
import { div, h2, h3, h4, input, makeDomComponent } from '../../src'
import { createRenderTarget } from '../helpers/createRenderTarget'
describe('DOMSource.events()', function() {
it('should catch a basic cl... | the_stack |
import * as _ from 'lodash';
import {
Component,
ElementRef,
EventEmitter,
Injector,
Input,
OnDestroy,
OnInit,
Output,
ViewChild
} from '@angular/core';
import {AbstractComponent} from '@common/component/abstract.component';
import {PopupService} from '@common/service/popup.service';
import {Subscribe... | the_stack |
import * as fc from "fast-check";
import { array } from "fp-ts/lib/Array";
import { left, right } from "fp-ts/lib/Either";
import { FunctionN, identity } from "fp-ts/lib/function";
import { pipe } from "fp-ts/lib/pipeable";
import {abort, done, interrupt, raise } from "../src/exit";
import { Wave } from "../src/wave";
... | the_stack |
import Base from '~/src/command/generate/base'
import { TypeWeiboUserInfo, TypeMblog, TypeWeiboEpub, TypeWeiboListByDay } from '~/src/type/namespace/weibo'
import TypeTaskConfig from '~/src/type/namespace/task_config'
import PathConfig from '~/src/config/path'
import MMblog from '~/src/model/mblog'
import MMblogUser fr... | the_stack |
import { Blob } from "./Blob";
import { GDIContext } from "./GDIContext";
import { EMFJSError, Helper } from "./Helper";
import { PointL, PointS, RectL, SizeL } from "./Primitives";
import { Region } from "./Region";
import { Brush, ColorRef, Pen } from "./Style";
class EmfHeader {
private size: number;
privat... | the_stack |
import { CodeBuild } from 'aws-sdk';
import * as setup from './hotswap-test-setup';
let hotswapMockSdkProvider: setup.HotswapMockSdkProvider;
let mockUpdateProject: (params: CodeBuild.UpdateProjectInput) => CodeBuild.UpdateProjectOutput;
beforeEach(() => {
hotswapMockSdkProvider = setup.setupHotswapTests();
mockU... | the_stack |
enum SegmentStyle {
//% block="blank"
Blank = 0,
//% block="thin"
Thin = 1,
//% block="narrow"
Narrow = 2,
//% block="medium"
Medium = 3,
//% block="thick"
Thick = 4
}
enum DigitRadix {
//% block="decimal"
Decimal = 10,
//% block="hex"
Hex = 16,
//% block="oc... | the_stack |
import { AccessLevelList } from "../shared/access-level";
import { PolicyStatement, Operator } from "../shared";
/**
* Statement provider for service [dax](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazondynamodbacceleratordax.html).
*
* @param sid [SID](https://docs.aws.amazon.com/IAM... | the_stack |
import * as React from "react";
import { Panel } from "azure-devops-ui/Panel";
import "./DependencyPanel.scss";
import {
ITimelineItem,
LoadingStatus,
ProgressTrackingCriteria,
IWorkItemIcon,
IProject,
IProjectConfiguration
} from "../../Contracts";
import { PortfolioPlanningDataServi... | the_stack |
import '@lib/crash/reporter'
import '@lib/logger/main'
import '@lib/tracker/main'
import Fs from 'fs'
import Path from 'path'
import { isEmpty, identity, pickBy } from 'lodash'
import { app, BrowserWindow, dialog, ipcMain } from 'electron'
import {
applicationMenu,
Menu as ContextMenu,
ProjectMenu,
Rep... | the_stack |
import {BindItem, ContainerContextType, ContainerNodeOptions} from '../../types';
import {IContainerState} from '../Container/reducer';
import {isObjectLike, each, isPlainObject, get} from 'lodash';
import {RunTimeContextCollection} from '../context';
import {
compileExpressionString,
evalInContext,
isExpre... | the_stack |
import { Construct } from 'constructs';
import { CfnHook } from './cfn-hook';
import { FromCloudFormationOptions } from './cfn-parse';
/**
* The possible types of traffic shifting for the blue-green deployment configuration.
*
* The type of the {@link CfnTrafficRoutingConfig.type} property.
*
* @stability stable
... | the_stack |
import {
SushiTokenInstance,
MockERC20Instance,
MasterChefInstance,
UniswapV2FactoryInstance,
UniswapV2Router02Instance,
UniswapV2PairInstance,
StrategyAllETHOnlyInstance,
StrategyLiquidateInstance,
BankInstance,
SimpleBankConfigInstance,
SushiswapGoblinInstance,
WETHInstance,
} from '../typecha... | the_stack |
module dragonBones {
/**
* @class dragonBones.Slot
* @classdesc
* Slot 实例是骨头上的一个插槽,是显示图片的容器。
* 一个 Bone 上可以有多个Slot,每个Slot中同一时间都会有一张图片用于显示,不同的Slot中的图片可以同时显示。
* 每个 Slot 中可以包含多张图片,同一个 Slot 中的不同图片不能同时显示,但是可以在动画进行的过程中切换,用于实现帧动画。
* @extends dragonBones.DBObject
* @see dragonBones.Armatu... | the_stack |
import assert = require('assert');
import { Context } from 'egg';
import { app, mock } from 'egg-mock/bootstrap';
import dayjs from '../../../../app/common/dayjs';
import { TestUtil } from 'test/TestUtil';
describe('test/port/controller/DownloadController/showPackageDownloads.test.ts', () => {
let ctx: Context;
le... | the_stack |
import { Equatable } from "@siteimprove/alfa-equatable";
import { Hash } from "@siteimprove/alfa-hash";
import { Serializable } from "@siteimprove/alfa-json";
import { Mapper } from "@siteimprove/alfa-mapper";
import { Parser } from "@siteimprove/alfa-parser";
import { Slice } from "@siteimprove/alfa-slice";
import { R... | the_stack |
import { IDocumentStore } from "../../../src";
import { disposeTestDocumentStore, testContext } from "../../Utils/TestUtil";
import { Company, User } from "../../Assets/Entities";
import { assertThat } from "../../Utils/AssertExtensions";
describe("RavenDB_15080", function () {
let store: IDocumentStore;
bef... | the_stack |
import * as Debug from 'debug';
import * as Models from '../models';
import { buildDeviceCapabilities } from './deviceCapability';
import * as validation from './validation';
const debug = Debug('neeo:device:DeviceBuilder');
const MAXIMAL_STRING_LENGTH = validation.MAXIMAL_STRING_LENGTH;
const DEFAULT_MANUFACTURER = ... | the_stack |
// Copyright (c) 2015 Vadim Macagon
// MIT License, see LICENSE file for full terms.
import { TargetStopReason, IFrameInfo, IBreakpointInfo } from './types';
import { extractBreakpointInfo } from './extractors';
/**
* Emitted when a thread group is added by the debugger, it's possible the thread group
* hasn't y... | the_stack |
const knownHTMLAttribs = [
"accept",
"accept-charset",
"accesskey",
"action",
"align",
"alt",
"async",
"autocomplete",
"autofocus",
"autoplay",
"bgcolor",
"border",
"cellpadding",
"cellspacing",
"charset",
"checked",
"cite",
"class",
"color",
"cols",
"colspan",
"content",
"... | the_stack |
import { BackingObjectMap, FieldMapping, StepMapping } from '../../views/landing/wizard/shared/field-mapping/FieldMapping';
import { PersistentStore } from '../../views/landing/wizard/shared/PersistentStore';
export interface UserDataEntry {
display: string, // what the user should see if this is displayed on a... | the_stack |
import { autobind } from 'core-decorators';
import FocusTrap from 'focus-trap-react';
import { List, Set } from 'immutable';
import keyboardJS from 'keyboardjs';
import qs from 'query-string';
import React from 'react';
import { RouteComponentProps, withRouter } from 'react-router';
import { Link } from 'react-router-d... | the_stack |
import es from '../../db/elasticsearch';
import { publishMainStream, publishNotesStream } from '../stream';
import { deliver } from '../../queue';
import renderNote from '../../remote/activitypub/renderer/note';
import renderCreate from '../../remote/activitypub/renderer/create';
import renderAnnounce from '../../remot... | the_stack |
import { computed, observable } from "../../node_modules/mobx/lib/mobx.module.js"
import { ChronoGraph } from "../../src/chrono/Graph.js"
import { CalculatedValueGen, CalculatedValueSync, Identifier } from "../../src/chrono/Identifier.js"
import { Base } from "../../src/class/Base.js"
import { AnyConstructor, ClassUnio... | the_stack |
import { CameraRecordingConfiguration, H264Level, H264Profile } from "homebridge";
import { ProtectCamera, RtspEntry } from "./protect-camera";
import { FfmpegProcess } from "./protect-ffmpeg";
import { ProtectCameraConfig } from "unifi-protect";
import events from "events";
// FFmpeg HomeKit Streaming Video recording... | the_stack |
import * as fs from 'fs';
import * as path from 'path';
import * as Doppio from '../doppiojvm';
import JVMThread = Doppio.VM.Threading.JVMThread;
import ReferenceClassData = Doppio.VM.ClassFile.ReferenceClassData;
import logging = Doppio.Debug.Logging;
import util = Doppio.VM.Util;
import ThreadStatus = Doppio.VM.Enums... | the_stack |
import { Column, ColumnBody, SearchableColumnHeader } from '.';
import { boolean, number } from '@storybook/addon-knobs';
import React from 'react';
import { action } from '@storybook/addon-actions';
const obj = {
title: 'ColumnMapper',
component: Column,
includeStories: [], // or don't load this file at all
};... | the_stack |
import * as _ from 'underscore';
import { Posts } from '../lib/collections/posts/collection';
import { Sequences } from '../lib/collections/sequences/collection';
import { Collections } from '../lib/collections/collections/collection';
import { ensureIndex } from '../lib/collectionUtils';
import { accessFilterSingle, a... | the_stack |
import fs from "fs";
import { sodium } from "../../src/Crypto";
import { numToUint8Array, numFromUint8Array, getPadding, toBase64 } from "../../src/Helpers";
/**
* Buzhash implements cyclic polymomial rolling hash function.
* It is a custom developed keyed variant with protections against plain text
* recovery fro... | the_stack |
import type { CeramicApi, StreamMetadata } from '@ceramicnetwork/common'
import { CommitID, StreamID, StreamRef } from '@ceramicnetwork/streamid'
import { TileDocument } from '@ceramicnetwork/stream-tile'
import { CIP11_DEFINITION_SCHEMA_URL } from '@glazed/constants'
import type { Definition } from '@glazed/did-datast... | the_stack |
import PlayerComponent from '../interfaces/component';
import EventsList from '../interfaces/events-list';
import Player from '../player';
import { EVENT_OPTIONS, IS_ANDROID, IS_IOS } from '../utils/constants';
import {
hasClass, isAudio, offset, removeElement
} from '../utils/general';
import { formatTime } from '... | the_stack |
import { BuildData, ObserveForStatus } from "@adpt/core";
import { DockerSplitRegistryInfo } from "../docker";
import { DaemonSetSpec } from "./DaemonSet";
import { DeploymentSpec } from "./Deployment";
import { PodSpec } from "./Pod";
import { ServiceSpec } from "./Service";
/**
* Kubernetes Kind
*
* @public
*/
e... | the_stack |
import * as E from 'fp-ts/Either'
import * as M from 'fp-ts/Monoid'
import * as O from 'fp-ts/Option'
import * as RA from 'fp-ts/ReadonlyArray'
import * as RTE from 'fp-ts/ReaderTaskEither'
import * as TE from 'fp-ts/TaskEither'
import { constVoid, Endomorphism, flow, pipe } from 'fp-ts/function'
import * as TD from 'i... | the_stack |
import { transfer } from 'comlink';
import {
RemoteReadableStream,
RemoteWritableStream,
} from '@transcend-io/remote-web-streams';
import streamSaver from 'streamsaver';
import mime from 'mime-types';
import {
ReadableStream,
WritableStreamIsNative,
WritableStreamPonyfill,
} from './streams';
// Local
impor... | the_stack |
// Add Functor into Kernel Call (done)
/**
* @file
* kernel.cuh
*
* @brief Forward Edge Map Kernel Entrypoint
*/
#pragma once
#include <gunrock/util/cta_work_distribution.cuh>
#include <gunrock/util/cta_work_progress.cuh>
#include <gunrock/util/kernel_runtime_stats.cuh>
#include <gunrock/oprtr/TWC_advance/kerne... | the_stack |
static inline void THNN_(VolumetricConvolution_shapeCheck)
(THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *weight,
THCTensor *gradWeight,
THCTensor *... | the_stack |
#define BSZ 16
/**
* \namespace kernels
* \brief Contains all the custom-written CUDA kernels.
*/
namespace kernels
{
/**
* \brief Calculates drag using a control-volume approach (left-right).
*
* Evaluate the contribution from the left and right parts of the control surface.
*
* \param FxX raw pointer to th... | the_stack |
using namespace cuHE;
using namespace cuHE_Utils;
///////////////////////////////////////////////////////////////////////////////
// @class CuDHS
///////////////////////////////////////////////////////////////////////////////
//// Constructor //////////////////////////////////////////
CuDHS::CuDHS(int d, int p, int w,... | the_stack |
#include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
// Algorithm itself has good performances, but memory allocation is a problem.
// I will try... | the_stack |
extern "C" {
#include <ccv.h>
#include <ccv_internal.h>
#include <nnc/ccv_nnc.h>
#include <nnc/ccv_nnc_easy.h>
#include <nnc/ccv_nnc_internal.h>
}
#include <nnc/gpu/ccv_nnc_compat.h>
static int _ccv_nnc_data_transfer(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const in... | the_stack |
#include <ATen/ATen.h>
#include "grid_sampler_cuda.cuh"
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <c10/macros/Macros.h>
namespace mmdetection {
usin... | the_stack |
#define LO16(x) ((x) & 0x0000FFFF)
#define HI16(x) ((x) >> 16)
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr... | the_stack |
namespace PyCA {
namespace Splatting{
inline __device__ void atomicSplat(int* d_wd, float mass, float x, float y, float z,
int w, int h, int l)
{
int xInt = int(x);
int yInt = int(y);
int zInt = int(z);
if (x < 0 && x != xInt) --xInt;
if (y < 0 && y != yInt) --yInt;
... | the_stack |
namespace megdnn {
namespace cuda {
namespace relayout_format {
namespace internal {
using namespace memory;
struct LayoutType {
static constexpr uint32_t NCHWx = 0;
static constexpr uint32_t NHWC = 1;
};
template <
typename Type_, int pack_size_, int chan_blk_, int width_, int size_nbits_,
ui... | the_stack |
#include "deviceCode.h"
#include "constants.h"
#include <optix_device.h>
#include <owl/common/math/random.h>
#include <owl/common/math/LinearSpace.h>
__constant__ LaunchParams optixLaunchParams;
typedef owl::common::LCG<4> Random;
typedef owl::RayT<0, 3> RadianceRay;
typedef owl::RayT<1, 3> ShadowRay;
typedef owl:... | the_stack |
#define SEP printf("-----------------------------------------------------------\n")
void gendata(float *ax,float *ay,float *az,
float *gx,float *gy,float *gz,
float *charge,float *size,int natom,int ngrid) {
int i;
printf("Generating Data.. \n");
for (i=0; i<natom; i++) {
ax[i] = ((float) rand... | the_stack |
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <algorithm>
#include <memory>
namespace faiss {
namespace gpu {
template <typename T>
void runAllPairwiseDistance(
bool computeL2,
GpuResources* res,
cudaStream_t ... | the_stack |
static bool THCUNN_checkKeysValues(THCState *state, THCudaLongTensor* keys,
THCTensor* values)
{
return THCudaLongTensor_size(state, keys, 0) == THCTensor_(nElement)(state, values)
&& THCTensor_(nDimension)(state, values) == 1
&& THCudaLongTensor_nDimension(state, ... | the_stack |
//Helper functions
//Reorders data
extern "C" __global__ void dataReorderR4(const int n_particles,
real4 *source,
real4 *destination,
uint *permutation) {
const int bid = blockIdx.y * gridDi... | the_stack |
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/utility/platform.h"
#include "cupoch/visualization/shader/normal_shader.h"
#include "cupoch/visualization/shader/shader.h"
using namespace cupoch;
using namespace cupoch::visualization;
using namespace cupoch::visualizat... | the_stack |
#include <stdlib.h>
#include <stdio.h>
#include <unordered_map>
#include <vector>
#include <cassert>
#include <cuda_runtime.h>
#include <cutensornet.h>
#include <cutensor.h>
#define HANDLE_ERROR(x) \
{ const auto err = x; \... | the_stack |
#include <cuda_runtime.h>
#include <cuComplex.h>
#include <complex.h>
#include <math.h>
#include <stdio.h>
#include <sys/time.h>
#define SINC_SUB 8192
#define SINC_LEN 8
#define SINC_HALF (SINC_LEN/2)
#define SINC_ONE (SINC_LEN+1)
#define IDX1D(i,j,w) (((i)*(w))+(j))
#define modulo_f(a,b) fmod(fmod(a,b)+(b),(b))
s... | the_stack |
#if(__CUDACC_VER_MAJOR__<9 || (__CUDACC_VER_MAJOR__==9 && __CUDACC_VER_MINOR__<2))
#if __CUDA_ARCH__>=700
#error CGBN requires CUDA version 9.2 or above on Volta
#endif
#endif
/****************************************************************************************************************
* cgbn_context_t imp... | the_stack |
#include "cuda/utils.h"
#include "cuda/helpers.h"
namespace fastertransformer {
#define MAX_BLOCKS_PER_BEAM 8
template <typename T>
void topK_kernelLauncher(const T* log_probs,
int* topk_tmp_id_buf,
T* topk_tmp_val_buf,
int* topk_id... | the_stack |
#include "HostDevice.hpp"
#include <cuda_runtime.h>
#include <limits>
#include <ostream>
namespace std {
/** \addtogroup VectorTypeLimits
* Provides numeric_limits max, min, lowest for most common CUDA vector types.
* In particular, it supports short2, ushort2, short4, ushort4, short2, ushort2,
* short4, ushort... | the_stack |
#include <simtbx/nanoBragg/nanotypes.h>
#include <simtbx/nanoBragg/nanoBraggCUDA.cuh>
using simtbx::nanoBragg::shapetype;
using simtbx::nanoBragg::hklParams;
__global__ void nanoBraggSpotsCUDAKernel(int spixels, int fpixels, int roi_xmin, int roi_xmax,
int roi_ymin, int roi_ymax, int oversample, int point_pixel,
... | the_stack |
namespace poisson
{
//------------------------------------------------------------------------
#define globalThreadIdx (threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * (blockIdx.x + gridDim.x * blockIdx.y)))
#if __CUDA_ARCH__ < 350
template <class T> __device__ __forceinline__ T __ldg (const T* in) { retur... | the_stack |
#include "filterinterpolation_cuda_kernel.cuh"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
#define DEBUG (0)
#ifndef BLOCKDIMX
#define BLOCKDIMX (32)
#end... | the_stack |
using namespace std;
typedef uint8_t uint8;
typedef unsigned int uint32;
typedef unsigned long long int uint64;
#define STREAM_BLOCK 16
#define BLOCK_SIZE 32
#define BLOCK_D_SIZE 64
#define INTEGRAL_BLOCK_SIZE 8
#define XDIM_MAX_THREADS 1024
#define XDIM_H_THREADS 512
#define XDIM_Q_THREADS 256
#define SHARED_MEMORY 49... | the_stack |
// The dimensions of the thread block
#define BLOCKDIM_X 16
#define BLOCKDIM_Y 16
#define ABS(n) ((n) < 0 ? -(n) : (n))
// Double single functions based on DSFUN90 package:
// http://crd.lbl.gov/~dhbailey/mpdist/index.html
// This function sets the DS number A equal to the double precision floating
// point number B... | the_stack |
#include <array/NDArrayFactory.h>
#include <array/ResultSet.h>
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/declarable/helpers/transforms.h>
#include <numeric>
namespace sd {
na... | the_stack |
#include "correlation_cuda_kernel.h"
#define real float
#define CUDA_NUM_THREADS 1024
#define THREADS_PER_BLOCK 32
__global__ void channels_first(float* input, float* rinput, int channels, int height, int width, int pad_size)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = blockIdx.x;... | the_stack |
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/random_erase.hpp>
#include <nbla/cuda/utils/nd_index.cuh>
#include <nbla/cuda/utils/random.cuh>
#include <nbla/variable.hpp>
#include <curand_kernel.h>
namespace nbla {
namespace random_erase {
template <typename T, bool accum = t... | the_stack |
#include <nvbench/benchmark_base.cuh>
#include <nvbench/device_info.cuh>
#include <nvbench/printer_base.cuh>
#include <nvbench/state.cuh>
#include <nvbench/summary.cuh>
#include <nvbench/detail/ring_buffer.cuh>
#include <nvbench/detail/throw.cuh>
#include <fmt/format.h>
#include <algorithm>
#include <cstdio>
#includ... | the_stack |
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
extern int nblock_size;
extern int maxgsx;
static cudaError_t crc;
extern "C" void gpu_deallocate(void *g_d, int *irc);
extern "C" void gpu_iallocate(int **g_i, int nsize, int *irc);
/*--------------------------------------------------------------------*/
__... | the_stack |
#include <cublas.h>
#define TRAINING_SIZE 5000
#define TEST_SIZE 1000
#define IMAGE_SIZE 784
float ONE=1.0;
float ZERO=0.0;
float *BIASES;
float *WEIGHTS;
float *D__BIASES;
float *D__WEIGHTS;
float *NABLA_B;
float *NABLA_W;
float *D__NABLA_B;
float *D__NABLA_W;
float *DELTA_NABLA_B;
float *DELTA_NABLA_W;
... | the_stack |
#define NUM_RND_BLOCKS 96
#define NUM_RND_THREADS_PER_BLOCK 128
#define NUM_RND_STREAMS (NUM_RND_BLOCKS * NUM_RND_THREADS_PER_BLOCK)
/*
* Defines for getting the values at the lower and upper 32 bits
* of a 64-bit number.
*/
#define LOW_BITS(x) ... | the_stack |
//needed for optionInputStruct
#include "blackScholesAnalyticEngineStructs.cuh"
//needed for the kernel(s) to run on the GPU
#include "blackScholesAnalyticEngineKernels.cu"
#include "blackScholesAnalyticEngineKernelsCpu.cu"
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <time.h>
#include <cuda.... | the_stack |
typedef struct {
int hashbitlen;
unsigned long long databitlen;
unsigned long long datasize_in_buffer;
uint64_t x[8][2];
unsigned char buffer[64];
} jhHashState;
__constant__ unsigned char d_JH256_H0[512];
__constant__ unsigned char d_E8_rc[42][32];
const unsigned char h_JH256_H0[128]={0xeb,0x98,0xa3,... | the_stack |
#include "thundersvm/kernel/smo_kernel.h"
#include <thrust/sort.h>
#include <thrust/system/cuda/detail/par.h>
namespace svm_kernel {
template<typename T>
__device__ int get_block_min(const T *values, int *index) {
int tid = threadIdx.x;
index[tid] = tid;
__syncthreads();
//bloc... | the_stack |
#include <iostream>
#include <algorithm>
// CHECK: #include <hip/hip_runtime.h>
#include <cuda.h>
template<typename T>
__global__ void axpy(T a, T *x, T *y) {
y[threadIdx.x] = a * x[threadIdx.x];
}
template<typename T1, typename T2>
__global__ void axpy_2(T1 a, T2 *x, T2 *y) {
y[threadIdx.x] = a * x[threadIdx.x]... | the_stack |
* This sample demonstrates Inter Process Communication
* using one process per GPU for computation.
*/
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <cuda.h>
#define CUDA_DRIVER_API 1
#include "helper_cuda.h"
#include "helper_cuda_drvapi.h"
#include "helper_multiprocess.h"
static const char shmN... | the_stack |
#include <cub/cub.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/MergeNetworkUtils.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/gpu/utils/WarpShuffles.cuh>
namespace faiss {
namespace gpu {
template <typename _Key, typename _Value>
st... | the_stack |
// Copyright (c) 2018 Changan Wang
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, di... | the_stack |
* Scan-scatter kernel. The third kernel in a radix-sorting digit-place pass.
******************************************************************************/
#pragma once
#include "radixsort_kernel_common.cu"
namespace b40c {
/**
* Register-saving variable qualifier. Can be used when declaring
* variables that ... | the_stack |
namespace fast_rnnt {
// forward of mutual_information. See """... """ comment of
// `mutual_information_recursion` in
// in k2/python/k2/mutual_information.py for documentation of the
// behavior of this function.
// px: of shape [B, S, T+1] if !modified, else [B, S, T] <-- work out
// `modified` from this.
// py:... | the_stack |
#include <nbla/cuda/function/depthwise_convolution.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/cuda/utils/block_reduce.cuh>
#include <nbla/singleton_manager.hpp>
namespace nbla {
namespace depthwise_convolution_cuda {
template <typename T, int K>
__global__ void forward_kernel_1d(const T *input_data, T *output... | the_stack |
#include "gpu/image/sampling.hpp"
#include "../deviceBuffer.hpp"
#include "../deviceStream.hpp"
#include "../surface.hpp"
#include "../gpuKernelDef.h"
#include "backend/common/vectorOps.hpp"
#include "cuda/util.hpp"
#include "image/kernels/sharedUtils.hpp"
#include "backend/cuda/core1/kernels/samplingKernel.cu"
#incl... | the_stack |
using namespace FW;
__constant__ unsigned char c_constants[sizeof(Constants)];
texture<float4, 2> t_textureAtlas;
//------------------------------------------------------------------------
// Lighting.
//------------------------------------------------------------------------
__device__ Vec3f FW::evalua... | the_stack |
* \test Tests the performance of multiple inner products with a common vector.
**/
//
// *** System
//
#include <iostream>
#include <iomanip>
#include <iterator>
//
// *** ViennaCL
//
#include "viennacl/vector.hpp"
#include "viennacl/vector_proxy.hpp"
#include "viennacl/linalg/inner_prod.hpp"
#include "viennacl/l... | the_stack |
NTL_CLIENT
#define bidx blockIdx.x
#define bidy blockIdx.y
#define bidz blockIdx.z
#define tidx threadIdx.x
#define tidy threadIdx.y
#define tidz threadIdx.z
#define bdimx blockDim.x
#define bdimy blockDim.y
#define bdimz blockDim.z
#define gdimx gridDim.x
#define gdimy gridDim.y
#define gdimz gridDim.z
namespace cuH... | the_stack |
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cuda_util.h"
#include <iostream>
#include "mat.h"
#include <float.h>
#include "pooling_cuda.h"
__global__ void gpu_pooling_cuda_forward_global_max(const float* a_input, const ncnn::CudaMatInfo a_info,
... | the_stack |
// All of the following CUDA kernels and constants are copied from PyTorch (link
// below) and modified/optimized for NNabla.
// https://github.com/pytorch/pytorch/blob/32b37ba2462d9d87337a4fe332f95524a4c49777/aten/src/ATen/native/cuda/Normalization.cuh
#include <nbla/cuda/common.hpp>
namespace nbla {
// The maximum... | the_stack |
#include <cmath>
#include <cub/cub.cuh>
#include <memory>
namespace dietgpu {
template <int Threads>
__device__ void histogramSingle(
const ANSDecodedT* __restrict__ in,
uint32_t size,
uint32_t* __restrict__ out) {
constexpr int kWarps = Threads / kWarpSize;
static_assert(Threads == kNumSymbols, "");
... | the_stack |
#define MULT0(a) {\
tmp = a[7]; \
a[7] = a[6]; \
a[6] = a[5]; \
a[5] = a[4]; \
a[4] = a[3] ^ tmp; \
a[3] = a[2] ^ tmp; \
a[2] = a[1]; \
a[1] = a[0] ^ tmp; \
a[0] = tmp; \
}
#define MULT2(a,j) { \
tmp = a[(j<<3)+7]; \
a[(j*8)+7] = a[(j*8)+6]; \
a[(j*8)+6] = a[(j*8)+5]; \
a[(j*8)+5] = a[(j*8)+4]; \
a[(j*8)... | the_stack |
template <typename scalar_t>
__device__ scalar_t deform_conv2d_im2col_bilinear(
const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h... | the_stack |
#include <cstdio>
#include <unistd.h>
#include <sstream>
#include <vector>
#include <iostream>
#include <cuda.h>
#include <cublas_v2.h>
#define CONVERT_BS 256
#define INVERSE_BS 16
void
checkCUDAError (cudaError_t err, char *funcName)
{
if (err != cudaSuccess)
{
fprintf(stderr, "CUDA error in %s \n", funcNam... | the_stack |
<<<<<<< HEAD:utils/inference.cu
#include "inference.cuh"
template <typename T>
void BERT_Attention (global_manager *handle,
T* &tensor,
size_t num_layer,
int* attention_mask) {
=======
#include "../ops/CrossEntropyLoss.cu"
#include "../ops/elemen... | the_stack |
#include "ew_op_gpu.h"
#include "gpu_hmma.h"
#include <stdio.h>
typedef unsigned long long uint64;
template <uint UNROLL, uint BLOCKS, uint BSIZE, typename T, typename V2, typename MASKT>
__global__ void __launch_bounds__(1024,BLOCKS) bst_masked_softmax(
const uint2* __restrict__ Lut,
const MASKT* __restrict... | the_stack |
#include "Morphology.h"
#include <iostream>
using namespace std;
#include "ErrorCode.h"
// 宏:MOR_USE_INTERMEDIA
// 开关宏,如果使能该宏,则 CLASS 内部提供开运算和闭运算的中间变量,免除返回申
// 请释放的开销,但这样做会由于中间图像尺寸较大而是的 Cache 作用被削弱,因此,
// 未必会得到好性能。关闭该宏,则每次调用开闭运算,都会临时申请中间变量。
#define MOR_USE_INTERMEDIA
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块尺寸
#d... | the_stack |
namespace sketch {
using hrc = std::chrono::high_resolution_clock;
#ifdef __CUDACC__
using exception::CudaError;
#endif
template<typename T, typename=std::enable_if_t<std::is_arithmetic<T>::value>>
#ifdef __CUDACC__
__host__ __device__
#endif
INLINE uint64_t nchoose2(T x) {
return (uint64_t(x) * uint64_t(x - 1)) ... | the_stack |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
// parameters for device execution
#define BLOCK_SIZE 64
#define GRID_SIZE 1500
// parameters for LIBOR calculation
#define NN 80
#define NMAT 40
#define L2_SIZE 3280 //NN*(NMAT+1)
#define NOPT 15
#define NPATH 96000
#define __fd... | the_stack |
#ifndef INCLUDE_GGNN_CACHE_CUDA_SIMPLE_KNN_SYM_CACHE_CUH_
#define INCLUDE_GGNN_CACHE_CUDA_SIMPLE_KNN_SYM_CACHE_CUH_
#include <cuda.h>
#include <cuda_runtime.h>
#include <cub/cub.cuh>
#include <limits>
#include "ggnn/utils/cuda_knn_utils.cuh"
template <DistanceMeasure measure,
typename ValueT, typename Key... | the_stack |
#include "src/DeviceTensorUtils.h"
#include "THCTensor.h"
#include "cuda/CudaUtils.cuh"
#include "cuda/DeviceTensor.cuh"
#include "cuda/MemoryAccess.cuh"
#include "cuda/util/CachedDeviceProperties.h"
#define ENABLE_CUDA_DEBUG
#include "cuda/CudaDebugUtils.cuh"
#include <thrust/host_vector.h>
#include <thrust/device_... | the_stack |
* \test Tests routines for matrix-vector operaions (BLAS level 2) using integer arithmetic.
**/
//
// *** System
//
#include <iostream>
#include <vector>
//
// *** ViennaCL
//
//#define VIENNACL_DEBUG_ALL
#include "viennacl/scalar.hpp"
#include "viennacl/matrix.hpp"
#include "viennacl/vector.hpp"
#include "viennac... | the_stack |
#include <cassert>
#include <cuda_bf16.h>
#include <cuda_fp16.h>
#include "ln.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
constexpr uint32_t THREADS_PER_WARP = 32;
///////////////////////////////////////////////////////////////////////////////////////////... | the_stack |
#include <thread>
#include <unordered_map>
#include <cuda_runtime.h>
#include <pybind11/numpy.h>
#ifndef NO_FAISS
#include "faiss/gpu/GpuIndexFlat.h"
#include "faiss/gpu/StandardGpuResources.h"
#endif
#include "graph.cuh"
#include "core/solver.h"
#include "model/visualization.h"
#include "gpu/visualization.cuh"
names... | the_stack |
#define debug_aml(a...)
//#define debug_aml(a...) printf(a);
#define debug_aml_VK(a...)
//#define debug_amlVK(a...) printf(a)
#pragma once
#ifdef BOOST_FOUND
// Boost includes for CPU Push Relabel Max Flow reference algorithms
#include <boost/config.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/grap... | the_stack |
* \file
* Utility types for device-wide scan and similar primitives
*/
#pragma once
#include <iterator>
#include "../../thread/thread_load.cuh"
#include "../../thread/thread_store.cuh"
#include "../../warp/warp_reduce.cuh"
#include "../../util_arch.cuh"
#include "../../util_device.cuh"
#include "../../util_namespa... | the_stack |
using namespace thrust;
__constant__ uint __storeStart__;
__constant__ uint __loadInvStart__;
/**
* number of variables of the input program.
*/
__constant__ uint __numVars__;
__constant__ uint* __ptsConstraints__;
__constant__ uint __numPtsConstraints__;
__constant__ uint* __copyConstraints__;
__constant__ uin... | the_stack |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.