serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
15,001 | __global__ void draw_julia(int w, int h, double zoom, int *data_ptr, double minx, double miny, double posX, double posY)
{
double cRe, cIm;
double newRe, newIm, oldRe, oldIm;
int maxIterations = 880000;
cRe = -0.74543;
cIm = 0.11301;
int r;
int g;
int b;
int y = blockDim.y * blockIdx... |
15,002 | /** Instrucciones
*
* El juego comienza con una configuracion al azar entre celdas vivas y muertas.
*
* Para modificar los valores de la ejecucion simplemente hay que modificar los
* valores de las constantes declaradas mas abajo.
*
* N: Numero de filas que tendra la matriz que almacene el estado del juego.
* ... |
15,003 | #include "includes.h"
__global__ void advNextStep(double *d_prevPoint, double *d_umat, double d_stepDist, int nRxns, double *points, int pointsPerFile, int pointCount, int index){
int newindex= blockIdx.x * blockDim.x + threadIdx.x;
int stride= blockDim.x * gridDim.x;
for(int i=newindex;i<nRxns;i+=stride){
points[poin... |
15,004 | #include "cuda.h"
#include "stdio.h"
#include "iostream"
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
void test1(){
int pts_num=500;//there are 500 points
int a = DIVUP(pts_num,THREADS_PER_BLOCK);// assign 2 block, because per block has a limit of 256
dim3 blocks(a, 3, 1);... |
15,005 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda_runtime.h>
#include"device_launch_parameters.h"
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
#define N (55*1024)
#if defined(__CUDA_ARCH__) ... |
15,006 | /*
CSC691 GPU programming
Project 2: In the Interest of Time
Naive GPU version
Jiajie Xiao
Oct 8, 2017
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
void gen_flattened_array(float *array, int rowSize, int colSize)
{
int i, j;
time_t t;
/* Intializes random number generator */
srand((unsigned)... |
15,007 | #include<cuda.h>
#include<stdio.h>
int main(void) {
void HostMMM(float *, float *, float *, int);
void CudaMMM(float *, float *, float *, int);
const int Width = 5;
float M[Width*Width], N[Width*Width], P[Width*Width];
for(int i = 0; i < (Width*Width) ; i++) {
M[i] = 5;
N[i] = 5;
P[i] = 0;
}
//HostMMM(M,... |
15,008 | #include <stdio.h>
__global__ void compute_primitive_vars_kernel (double *vx, double *vy, double *vz, double *u, int nelt, int nxyz,int ntot,int irpu, int irpv, int irpw, int iret, int irg, int toteq,int if3d,double *scr, double* energy, double *vtrans, int irho, double *phig, int lx1, int ly1, int lz1, int *lglel, do... |
15,009 | /*
* Adding two vectors using CUDA
*/
#include<iostream>
#include<stdio.h>
#include<math.h>
using namespace::std;
__global__
void add(int n, float *x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=index; i< n ; i+=stride){
y[i] = x[i] + y[i]... |
15,010 | #include <stdio.h>
#define TILE_DIM 16
__global__ void multiMatrix (int *a, int *b, int *c, int N) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
int pos = idx + idy * N;
int temp_result ... |
15,011 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <assert.h>
//#include <time.h>
#define N 2 //512
__global__ void Asum(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
|
15,012 |
#include <vector>
#include <algorithm>
#include "MouseMoveCallback.cuh"
#include "MouseMoveListener.cuh"
MouseMoveCallback::MouseMoveCallback()
{
}
void
MouseMoveCallback::invoke(float posX, float posY)
{
for (auto listener = listeners.begin(); listener != listeners.end(); ++listener)
{
(*listener)->onMouseMov... |
15,013 | #include <stdio.h>
#define CSC(call) { \
cudaError err = call; \
if(err != cudaSuccess) { \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s.\n", \
__FILE__, __LINE__, cudaGetErrorString(... |
15,014 | /**
* Programação Concorrente - SSC0143 - 2 Semestre de 2015
* Prof. Dr. Júlio Cezar Estrella
* Trabalho 3 - Smoothing de imagem utilizando CUDA
*
* Alunos:
* Thiago Ledur Lima - 8084214
* Rodrigo Neves Bernardi - 8066395
**/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
/... |
15,015 | #include <stdio.h>
#include <cuda.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#define STEPS 1024
#define LENGTH 1024
#define U 0.1f
#define H 0.1f
#define TAU 0.2f
#de... |
15,016 |
#include <cstdio>
#include <cstdlib>
#include <random>
#include <sys/time.h>
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA assert: %s %s %d\n", cuda... |
15,017 | /**
* Nearest neighbor search
* マップ内に、店、工場などのゾーンがある確率で配備されている時、
* 住宅ゾーンから直近の店、工場までのマンハッタン距離を計算する。
*
* 各店、工場から周辺に再帰的に距離を更新していくので、O(N)で済む。
* しかも、GPUで並列化することで、さらに計算時間を短縮できる。
*
* shared memoryを使用して高速化できるか?
*/
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <list>
#include <time.h>
#define CELL... |
15,018 | #include<stdio.h>
#define Width 512 // size of Width x Width matrix
#define TILE_WIDTH 16
__global__ void matrixMul(float* A, float* B, float* C, int width)
{
__shared__ float As[TILE_WIDTH] [TILE_WIDTH];
__shared__ float Bs[TILE_WIDTH] [TILE_WIDTH];
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
... |
15,019 |
#include <cuda_runtime.h>
__global__ void resize_nearest_kernel_2d(int nbatch, float scale, int2 osize, float const* idata, int istride,
int ibatchstride, float* odata, int ostride, int obatchstride)
{
int x0 = threadIdx.x + blockIdx.x * blockDim.x;
int y0 = threadIdx.y + blockIdx.y * blockDim.y;
int ... |
15,020 | /*
* Copyright 2017-2022 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related docum... |
15,021 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define THREADS_PER_BLOCK 1024
#define THRESHOLD 67108864
__global__ void encrypt(int n, char *m, char *k, char *c){
int j, i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){
for(j = 1; j <= 100; j++){
c[i] = m[i] ^ k[i];
}
}
}
int main(){
/* ... |
15,022 | #include <cstdlib>
#include <cstdio>
#include <cmath>
#include <cassert>
#define SIZE 32
void print_err_msg(cudaError_t err) {
if(err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
__global__ void matrix_add3(float* d_A, float* d_B,... |
15,023 | #define WARP_SIZE 32
#define HALF_WARP_SIZE (WARP_SIZE >> 1)
__global__ void refCounter_kernel(unsigned int * d_counters0,
unsigned int * d_counters1,
unsigned int * d_del0,
unsigned int * d_del1,
... |
15,024 | #include "includes.h"
__global__ void IndexInteranlNode(bool *forest, int base, int step)
{
int left = 2*(base+threadIdx.x);
int right = left + 1;
int offset = blockIdx.x*step;
forest[offset+base+threadIdx.x] = (forest[offset+left]&&forest[offset+right]);
} |
15,025 | #include <iostream>
#include <vector>
#include <set>
#include <map>
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include <cmath>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define MAX_THREADS_PER_BLOCK 1024
#define GLOBAL_MAX_E... |
15,026 | #include<stdio.h>
#include<cuda.h>
// KERNEL
__global__
void Square(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
}
int main(int argc, char ** argv){
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// GENERATING INPUT ARRAY IN HOST
... |
15,027 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
void addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
int maindddd()
{
const int arraySize = 5;
const... |
15,028 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <stdbool.h>
#define min(a,b) ((a) < (b) ? (a) : (b))
#define bufferSize 1024
#define NUMTHREAD 256
#define NUMBLOCK 64
char buffer[bufferSize];
// why deviceNum is 1 for default, it should be zero!!!!
int deviceNum = 1, debug = ... |
15,029 | #include "includes.h"
/* TODO: Your code here */
/* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */
// y = inputs[0], y_ = inputs[1]
// np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True)
__global__ void relu_kernel(const float *input, float *output, int n) {
int index = blockDi... |
15,030 | #include "includes.h"
#define getPos(a,k) (((a)>>(k-1))&1)
extern "C" {
}
__global__ void prefixSum(int * input_T, int * prefix_T, int * prefix_helper_T, int n, int k, int blockPower) {
__shared__ int tmp_T[1024];
for(int i = 0; i<blockPower; i++) {
if(threadIdx.x + 1024*blockIdx.x + i*1024*gridDim.x >= n) return... |
15,031 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
/***********************************************/
/* for debug: check the output */
/***********************************************/
void write_output(float *arr, int size, const char *filename)
{
FILE *fp;
if((fp = fopen(filename, "w... |
15,032 | /**
* 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#i... |
15,033 | #include "includes.h"
__global__ void revisedArraySum(float *array, float *sum){
__shared__ float partialSum[256];
int t = threadIdx.x;
for(int stride = 1;stride < blockDim.x; stride *= 2){
__syncthreads();
if(t % (2 * stride) == 0){
partialSum[t] += partialSum[t + stride];
}
}
sum[0] = partialSum[0];
} |
15,034 | #include "includes.h"
__global__ void addOneRowPerThread(double* a, double* b, double* c, int n)
{
// Get the row for current thread
int row = (blockIdx.y * blockDim.y + threadIdx.y);
// Make sure we do not go out of bounds
if (row < n)
{
int idx = row * n;
for (int i = 0; i < n; i++)
{
c[idx + i] = a[idx + i] + b[idx... |
15,035 | /*
HPC ASSIGNMENT 1 : QUESTION 4
Name : Arvind Sai K , Derik Clive
RollNo: 15CO207 , 15CO213
*/
#include<stdio.h>
#include<time.h>
#include<stdlib.h>
#define ARRAY_ROWS 700
#define ARRAY_COLS 700
__global__ void mat_add(int d_a[ARRAY_ROWS][ARRAY_COLS], int d_b[ARRAY_ROWS][ARRAY_COLS], int d_c[ARRAY_ROWS][ARRAY... |
15,036 | #include <cstdio>
#include <stdio.h>
int main(void)
{
int a[100000];
int b[100000];
int *ary1;
for(int i=0;i<100000;i++)
{
a[i] = i;
}
cudaMalloc((void**)&ary1 , 100000*sizeof(int));
cudaMemcpy(ary1, a, 100000*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(b, ary1 ,100000*sizeof(int),cudaMemcpyDeviceToHost... |
15,037 | /*==========================================================
* patch2hank.cu
*
* making block hankel matrix
*
* compile command
* nvcc('patch2hank_single.cu -arch sm_35')
*
* This is a MEX-file for MATLAB.
*
*========================================================*/
/* $created: 11-Mar-2015 $ */
// #define ... |
15,038 | #include "includes.h"
__global__ void iterative_saxpy_kernel(float *y, const float* x, const float alpha, const float beta, int n_loop)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < n_loop; i++)
y[idx] = alpha * x[idx] + beta;
} |
15,039 | #include "includes.h"
__global__ void mInitVelocity(float *u_dimX, float *u_dimY) {
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
u_dimX[Idx] = 0.f;
u_dimY[Idx] = 0.8f/(float)(blockIdx.x+1);
} |
15,040 | #include <iostream>
#include <iomanip>
#include <time.h>
using namespace std;
__global__
void sumation (int max, int *sumPtr) {
printf("%d\n", max);
for (int i = 1; i <= max; i++) {
*sumPtr += i;
printf("%d\n", *sumPtr);
}
}
int main() {
clock_t t1, t2;
t1 = clock();
cout << "Program started..." << endl;... |
15,041 | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
typedef struct _RESULT
{
float score;
int match;
} RESULT;
typedef struct _DIM
{
int x;
int y;
int z;
} DIM;
typedef struct _INDEX
{
int i;
int j;
} INDEX;
//#define N_DEBUG
struct PathNode
{
int ... |
15,042 | #include "includes.h"
__global__ void addByThreads(int *a, int *b, int *c)
{
// a block can be split into parallel threads.
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
} |
15,043 | #include<stdio.h>
#include<assert.h>
#include<cuda.h>
#include<errno.h>
#include<math.h>
#include<sys/time.h>
#define MAX_VAL 100
#ifndef BLOCK_WIDTH
#define BLOCK_WIDTH 256
#endif
#ifndef MAX_SIZE
#define MAX_SIZE 512 // working on single block
#endif
#define TILE_WIDTH (BLOCK_WIDTH*2)
cudaError_t cuerr;
float* creat... |
15,044 | #include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__device__ int devData;
__host__ __device__ int run_on_cpu_or_gpu() {
return 1;
}
__global__ void run_on_gpu() {
printf("run_on_cpu_or_gpu GPU: %d\n", run_on_cpu_or_gpu());
}
int main() {
int val = run_on_cpu_... |
15,045 | #include <stdio.h>
#include <stdlib.h>
#include<algorithm>
#include <math.h>
#define BLOCK_SIZE 1024
#define COUNT_NUMBER 101
using namespace std;
//INSERT CODE HERE---------------------------------
__global__ void counting(int* g_A, int* g_C,int counting_size ) {
__shared__ int count_arr[2][COUNT_NUMBER];//edit
... |
15,046 | #define RED 0
#define GREEN 1
#define BLUE 2
void call_kernel(char * cuda_frame_in, int height, int width);
__global__ void kernel_grey(char * frame, int height, int width)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int red, green, blue;
for(; i<width * height; i+= ( gridDim.x * blockDim.x))
{
red = ... |
15,047 | // Checked C extension is not supported for CUDA. Make sure driver
// rejects the flag.
//
// RUN: %clang -fcheckedc-extension -nocudalib -nocudainc -fsyntax-only -c %s 2>&1 | FileCheck %s
// CHECK: warning: Checked C extension not supported with 'CUDA'; ignoring '-fcheckedc-extension'
//
// Have clang compile this f... |
15,048 | #include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string.h>
#include <ctime>
#include <random>
#define K_mer 17
#define sample_num 200
#define loops 1000
#include "functions_serial.cu"
using namespace std;
void constructCollection(char *inpu... |
15,049 | float h_A[]= {
0.9873887992938584, 0.7518116992518353, 0.8598940369927895, 0.7155885551816444, 0.8797425552930407, 0.587458783832594, 0.6267841600435446, 0.8217790907969758, 0.818894981367343, 0.7872291426722382, 0.6357493323386308, 0.5550871670133021, 0.8461501536000029, 0.7977451395282227, 0.7555389775190972, 0.52923... |
15,050 | #include <limits.h>
#include <stdio.h>
#define ALLOC_SIZE 1024
__global__ void simple_kernel(int *hostMem) {
hostMem[0] = 42;
}
int main() {
int *hostMem;
cudaMalloc((void**)&hostMem, ALLOC_SIZE*sizeof(int));
simple_kernel<<<1, 1>>>(hostMem);
cudaFree(hostMem);
cudaDeviceReset();
... |
15,051 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <iostream>
int main(){
const int N = 108000000;
const int M = 70000000;
thrust :: device_vector <int> D(N, 0);
thrust :: fill(D.begin(), D.end(), 1);
for(int ... |
15,052 | /*************************************************************************************************************
* FILE: p2.cu
*
* AUTHOR: attiffan Aurora T. Tiffany-Davis
*
* DESCRIPTION: A CUDA program that calculates an approximate value for PI using Monte Carlo methods.
*
* TO RUN: ... |
15,053 |
#include <stdio.h>
#define N 10000
float a[N], b[N], c[N];
__global__ void kernelSumaVectores(float *a, float *b, float *c, int n)
{
int i = threadIdx.x+blockIdx.x*blockDim.x;
while (i<n) {
c[i]=a[i]+b[i];
i+= blockDim.x*gridDim.x;
}
}
void sumaVectoresEnDevice(float *a, float *b,... |
15,054 | #include "complex.cuh"
__device__ complex::complex(float real, float img) : r(real), i(img) {}
__device__ complex complex::operator+(complex &k) const {
return {r+k.r, i+k.i};
}
__device__ complex complex::operator*(complex &k) const {
return {r*k.r - i*k.i, i*k.r + r*k.i};
}
__device__ float complex::magni... |
15,055 | #include <iostream>
#include <stdio.h>
#define M 16
#define N 16
#define BLOCK_SIZE 16
#define BLUR_SIZE 1
using namespace std;
__global__ void add(float *cudaA, float *kernel, float *cudaResult)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int gid = idy * N... |
15,056 | #include "includes.h"
// Device code for ICP computation
// Currently working only on performing rotation and translation using cuda
#ifndef _ICP_KERNEL_H_
#define _ICP_KERNEL_H_
#define TILE_WIDTH 256
#endif // #ifndef _ICP_KERNEL_H_
__global__ void CalculateBestIndex(double * distance_d, int ... |
15,057 | #include <cuda.h>
#include <stdio.h>
__global__ void add(int *a, int *b, int *c) {
int index = blockIdx.x;
c[index] = a[index] + b[index];
}
#define N 512
void random_ints(int* x, int size)
{
int i;
for (i=0;i<size;i++) {
x[i]=rand()%10;
}
}
int main(void) {
int *a, *b, *c; // host copies of a, b, c
int *a... |
15,058 | #include "includes.h"
__global__ void set_permutation(int *d_permutation, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
d_permutation[i] = i;
} |
15,059 | #include "includes.h"
__global__ void timeDomainConvolutionNaive(float* ibuf, float* rbuf, float* obuf, long long oframes, long long rframes, int ch, float gain) {
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float value = 0;
for (int k = 0; k < rframes; k++) {
value += ibuf[threadID - k] * rbuf[k];
}
obuf[thr... |
15,060 | #include "includes.h"
__global__ void reduceMax(const float* d_in, float* d_out)
{
int abs_x = threadIdx.x + blockIdx.x * blockDim.x;
int thread_x = threadIdx.x;
extern __shared__ float sdata[];
sdata[thread_x] = d_in[abs_x];
__syncthreads();
int last_i = blockDim.x;
for (unsigned int i = blockDim.x / 2; i > 0; i >>... |
15,061 | #include "includes.h"
__global__ void saxpy(float scalar, float * x, float * y)
{
// Determine our unique global thread ID, so we know which element to process
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < N ) // Make sure we don't do more work than we have data!
y[tid] = scalar * x[tid] + y[tid];
} |
15,062 | #include <stdio.h>
int main(int argc, char** argv)
{
cudaDeviceProp dP;
float min_cc = 3.0;
int rc = cudaGetDeviceProperties(&dP, 0);
if (rc != cudaSuccess)
{
cudaError_t error = cudaGetLastError();
printf("CUDA error: %s", cudaGetErrorString(error));
return rc; /* Failure ... |
15,063 | #include "includes.h"
/* This file is copied from https://github.com/jzbonter/mc-cnn */
extern "C" {
}
#define TB 128
#define DISP_MAX 256
__global__ void remove_nonvisible(float *y, int size, int size3)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
int x = id % size3;
if (y[id] >= x) {
y[id] ... |
15,064 | #include "includes.h"
using namespace std;
#define eps 1e-4
//每个thread负责output的一个pixel
__global__ void convolution2d(float *img, float *kernel, float* result, int n, int m, int kw, int kh, int out_n, int out_m, bool padding)
{
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int x = bx * ... |
15,065 | #pragma once
#include<iostream>
#include<math.h>
__global__ void matMulKernel(
float *a, int a_rows, int a_cols,
float *b, int b_rows, int b_cols,
float *c, int c_rows, int c_cols
)
{
float Cvalue = 0.0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
... |
15,066 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <cuda.h>
#define u32 unsigned int
#define BLOCK_SIZE 64
#define CREATE_RAND_ARR(arr, size, min, max) \
do { \
time_t t; \
srand((unsigned)time(&t)); ... |
15,067 | __global__ void vmin4(
int32_t &d,
unsigned const &A,
unsigned const &B,
int32_t const &c){
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 300))
asm volatile("vmin4.s32.s32.s32.add %0, %1, %2, %3;"
: "=r"(d)
: "r"(A), "r"(B), "r"(c));
#endif
}
__global__ void vmin4_intrinsic... |
15,068 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <curand.h>
#include <limits.h>
#define DIM_X 10
#define DIM_Y 10
#define DIM_Z 10
void usage(int argc,char ** argv)
{
printf("%s usage:\n",argv[0]);
printf(" :%s num\n",argv[0]);
exit(1);
}
__global__ void inCircle(float * x... |
15,069 | /*********************************************************************
* Copyright © 2011-2014,
* Marwan Abdellah: <abdellah.marwan@gmail.com>
*
* This library (cufftShift) is free software; you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public
* License as published by the F... |
15,070 |
#include <cuda_runtime.h>
#include <iostream>
#include <ctime>
__global__ void matmulDevice(int *A, int *B, int *C, int N)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
int sum = 0;
for (int i = 0; i < N; ... |
15,071 |
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
// Experiment with 3 part scans.
__global__ void silly_kernel(int n, int* in, int* out) {
if (threadIdx.x == 0){
int acc = 0;
for (int i = 0; i < n; ++i){
acc += in[blockIdx.x * n + i];
out[blockIdx.x * n + i] = acc;
... |
15,072 | /* helloCUDA.cu */
/****************************************************************************/
/* */
/* (C) 2010 Texas Advanced Computing Center. */
/* ... |
15,073 | /*!
* @file YuriConvertCuda.cu
* @author Zdenek Travnicek
* @date 13.8.2010
* @date 16.2.2013
* @copyright Institute of Intermedia, CTU in Prague, 2010 - 2013
* Distributed under modified BSD Licence, details in file doc/LICENSE
*
*/
#include <cuda.h>
//include "yuri/video/YuriConvertor.h"
__devic... |
15,074 | #include "includes.h"
__global__ void cube(float * d_out, float * d_in){
// Todo: Fill in this function
int i = threadIdx.x;
if(i<96){
d_out[i]=d_in[i]*d_in[i]*d_in[i];
}
} |
15,075 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <cstdlib>
#include <time.h>
#include "cuda_runtime.h"
__global__ void parInterweavedSOE(int * a, int n, int sqrt_N) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0 && threadIdx.y == 0) {
a[0] = 1;
a[... |
15,076 | #include <cstdio>
#include <cstdlib>
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { ... |
15,077 | // tdfc-cuda backend autocompiled body file
// tdfc version 1.160
// Thu May 26 15:56:56 2011
#include <stdio.h>
__global__ void tdfc_copy(float* cc_x,float* cc_y,int N )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx<N)
{
{
cc_y[idx] = (cc_x[idx]);
}
}
} //tdfc_c... |
15,078 | // https://wagonhelm.github.io/articles/2018-03/detecting-cuda-capability-with-cmake
// Justin Francis
#include <stdio.h>
int main(int argc, char **argv){
cudaDeviceProp dP;
float min_cc = 5.2;
int rc = cudaGetDeviceProperties(&dP, 0);
if(rc != cudaSuccess) {
cudaError_t error = cudaGetLastEr... |
15,079 | // Vector Dot Product A.B
// compile with the following command:
//
// (for GTX970)
// nvcc -arch=compute_52 -code=sm_52,sm_52 -O2 -m64 -o vecAdd vecAdd.cu
//
// (for GTX1060)
// nvcc -arch=compute_61 -code=sm_61,sm_61 -O2 -m64 -o vecAdd vecAdd.cu
// Includes
#include <stdio.h>
#include <stdlib.h>
// Variables
float... |
15,080 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <device_functions.h>
#define ARR_LEN 12
#define swap(A, B) \
{ \
int temp = A; \
A = B; \
B = temp; \
}
/*
* Q3. Sort an array of size ARR_LEN using parallel odd-even transposition sort.
*... |
15,081 | // nnIndex: B*N*K;
// nnCount: B*N;
// input: B*M*C;
// output: B*N*C (N>M)
__global__ void mean_interpolate_forward(int B, int N, int M, int C, int K, const int* nnIndex,
const int* nnCount, const float* input, float* output)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
... |
15,082 | // Matrix multiplication between square matrices using rows of A shared among threads of the same block.
// The grid of blocks is made by unidimensional blocks of legth MATRIXSIZE.
// Each block contains NUM_THREADS cells of size CELL_SIZE.
// CUDA kernels are asynchronous, so in order to perform time measurements it ... |
15,083 | // $Smake: nvcc -O2 -o %F %f
//
// add-vectors.cu - addition of two arrays on GPU device
//
// This program follows a very standard pattern:
// 1) allocate memory on host
// 2) allocate memory on device
// 3) initialize memory on host
// 4) copy memory from host to device
// 5) execute kernel(s) on device
// 6) c... |
15,084 | #include "thrust_all.cuh"
int main(void)
{
srand(time(NULL));
thrust::host_vector<int> h_vec(1000);
thrust::generate(h_vec.begin(),h_vec.end(),rand);
thrust::device_vector<int> d_vec = h_vec;
long int h_sum = thrust::reduce(h_vec.begin(),h_vec.end());
std::cout << "host : " << h_sum << '\n';
long int d... |
15,085 | #include "includes.h"
__global__ void transform2d(float *points3d_after, float fov_scale)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int w = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
{
int iw = x;
int ih = y + j;
float x = points3d_after[(ih * ... |
15,086 | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
__global__ void addArray(int *ary1, int *ary2)
{
int indx = threadIdx.x;
ary1[indx] = ary2[indx];
}
int main(int argc,char **argv)
{
int ary[32]{0};
int res[32]{0};
for(int i = 0; i < 32; i++){
ary[i] = 2*i;
}
int *d_... |
15,087 | #include "includes.h"
__device__ float3 operator % (const float3 & v1, const float3 & v2)
{ return make_float3(v1.y*v2.z - v1.z*v2.y,
v1.z*v2.x - v1.x*v2.z,
v1.x*v2.y - v1.y*v2.x);}
__global__ void CopyOutBack(float4* d_tetT, float* d_vertT, int* d_vertMem, int* d_vertMemOutside, int* d_BlockSizes, int* d_ActiveList,... |
15,088 | #include "includes.h"
__global__ void jacobi_init( const int x_inner, const int y_inner, const int halo_depth, const double* density, const double* energy, const double rx, const double ry, double* kx, double* ky, double* u0, double* u, const int coefficient)
{
const int gid = threadIdx.x+blockIdx.x*blockDim.x;
if(gid ... |
15,089 | #include "includes.h"
__global__ void bs(float *drand, float *dput, float *dcall, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n) {
float c1 = 0.319381530f;
float c2 = -0.356563782f;
float c3 = 1.781477937f;
float c4 = -1.821255978f;
float c5 = 1.330274429f;
float zero = 0.0f;
float one = 1.0f;
fl... |
15,090 | #include <stdio.h>
#include <stdexcept>
#include "thrust/device_vector.h"
#include "cuComplex.h"
#include "cufft.h"
#define XSIZE 7
#define YSIZE 128
#define ZSIZE 48
#define NCHAN_COARSE 336
#define NCHAN_FINE_IN 32
#define NCHAN_FINE_OUT 27
#define NACCUMULATE 256
#define SAMPS_PER_ACCUMULATE 128
#define NPOL 2
#de... |
15,091 | #include <stdio.h>
__device__ int Square(void *x)
{
int *time = (int *) x;
// int *temp = (int *) malloc(sizeof(int));
// which is equivalent to sleeping for kernel_time microseconds
int i = *time;
int j = i * i;
*time = j;
//x = (void *) temp;
return 7331;
}
|
15,092 | // Standard C++ includes
#include <algorithm>
#include <chrono>
#include <iostream>
#include <numeric>
#include <random>
#include <stdexcept>
#include <string>
#include <sstream>
#include <tuple>
#include <vector>
// Standard C includes
#include <cassert>
#include <cmath>
// CUDA includes
#include <cooperative_groups... |
15,093 | #include <cuda.h>
#include <cufft.h>
#include <cuda_profiler_api.h>
#include <stdio.h>
template<typename T>
__device__ __forceinline__ T ldg(const T* ptr) {
#if __CUDA_ARCH__ >= 350
return __ldg(ptr);
#else
return *ptr;
#endif
}
#ifndef PI_8
#define PI_8 0.39269908169
#endif
extern "C"
__global__
void Thin(
int nz... |
15,094 | #include <iostream>
#include <random>
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <ctime>
#include "cuda_runtime.h"
const unsigned int MAX_THREADS_PER_BLOCK = 1024;
const size_t DATA_COUNTS = 50 * 1024 * 1024;
int generated_random_numbers(int *arr, size_t counts)
{
srand(static_cast<unsigned in... |
15,095 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//#define NUM_DATA = 65535
unsigned NUM_DATA = 2147483647;
//#define MAX_THREAD_IN_SINGLE_BLOCK = 8*8*8
//#define MAX_BLOCK_COUNT_IN_GRID = 1024*1024*1024*4 // Same as unsigned size
//#define BLO... |
15,096 | #include "includes.h"
__global__ void device_only_copy(float* output, float* input, size_t total_size){
for(size_t i = blockIdx.x * blockDim.x + threadIdx.x;
i < total_size;
i += blockDim.x * gridDim.x){
output[i] = input[i];
}
__syncthreads();
} |
15,097 | #include "includes.h"
__global__ void set_coords_3D(float* coords, size_t z, size_t y, size_t x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t id_x = index % x;
size_t id_y = (index / x) % y;
size_t id_z = index / (x * y);
if(index < x * y * z){
coords[index] = id_z - (float)z/2.0;
coords[index + x * y ... |
15,098 | #include "includes.h"
__global__ void kernel_pow_grad_device(int *x, int power, int *grad, int *out, bool grad_is_scalar, unsigned int size) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
out[i] = grad[(grad_... |
15,099 | extern "C"
__global__ void sampleKernel(float** globalInputData, int size, float* globalOutputData)
{
const unsigned int tidX = threadIdx.x;
globalOutputData[tidX] = 0;
for (int i=0; i<size; i++)
{
globalOutputData[tidX] += globalInputData[tidX][i];
}
__syncthreads();
}
|
15,100 | #include "includes.h"
__global__ void naiveKernel(int N, float *input, float *output){
float res = 0.;
for(int i=0;i<N;++i) res += input[i];
*output = res/N;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.