serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
14,301 | #include "includes.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Multi-GPU sample using OpenMP for threading on the CPU side
* needs a compiler that supports OpenMP 2.0
*/
using namespace std;
// a simple kernel that simply increments each array element by b
// a predicate that checks whether each array elemen is set to its index plus b
__global__ void kernelAddConstant(int *g_a, const int b)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_a[idx] += b;
} |
14,302 | #include "stdio.h"
#define NUM_BLOCKS 32
#define NUM_THREADS_PER_BLOCK 32
// value is a pointer to a single integer
__global__ void testKernel(int* value) {
if (blockIdx.x == 0 && threadIdx.x == 0) *value = 1;
}
int main(int argc, char** argv) {
int value_cpu = 0;
int* value_gpu;
cudaMalloc((void**)&value_gpu,sizeof(float));
cudaMemcpy(value_gpu,&value_cpu,sizeof(float),cudaMemcpyHostToDevice);
/* Call the gpu kernel */
testKernel<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>(value_gpu);
cudaMemcpy(&value_cpu,value_gpu,sizeof(float),cudaMemcpyDeviceToHost);
if (value_cpu == 1) {
printf("The cuda test passed GPU not corrupted\n");
return 0;
} else {
printf("The cuda test failed the GPU is corrupted\n");
return -1;
}
}
|
14,303 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <sys/time.h>
// includes, kernels
#include "trap_kernel.cu"
#define LEFT_ENDPOINT 10
#define RIGHT_ENDPOINT 1005
#define NUM_TRAPEZOIDS 100000000
double compute_on_device(float, float, int, float);
extern "C" double compute_gold(float, float, int, float);
int
main(void)
{
int n = NUM_TRAPEZOIDS;
float a = LEFT_ENDPOINT;
float b = RIGHT_ENDPOINT;
float h = (b-a)/(float)n; // Height of each trapezoid
printf("The height of the trapezoid is %f \n", h);
double reference = compute_gold(a, b, n, h);
printf("Reference solution computed on the CPU = %f \n", reference);
/* Write this function to complete the trapezoidal on the GPU. */
double gpu_result = compute_on_device(a, b, n, h);
printf("Solution computed on the GPU = %f \n", gpu_result);
}
/* Complete this function to perform the trapezoidal rule on the GPU. */
double
compute_on_device(float a, float b, int n, float h)
{
return 0.0;
}
|
14,304 | #include <stdio.h>
__global__ void EmptyKernel() { }
int main() {
const int N = 100000;
float time, cumulative_time = 0.f;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for (int i=0; i<N; i++) {
cudaEventRecord(start, 0);
EmptyKernel<<<1,1>>>();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cumulative_time = cumulative_time + time;
}
printf("Kernel launch overhead time: %3.5f ms \n", cumulative_time / N);
return 0;
}
|
14,305 |
#include <iostream>
#include <chrono>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
int THREADS_PER_BLOCK;
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
__global__ void polynomial(float* array, float* poly, int degree, int n) {
int index = threadIdx.x+ blockIdx.x* blockDim.x;
if(index < n){
float out = 0.;
float xtothepowerof = 1.;
for (int i=0; i<=degree; i++) {
out += xtothepowerof * poly[i];
xtothepowerof *= array[index];
}
array[index] = out;
}
}
void polynomial_expansion (float* poly, int degree,
int n, float* array) {
float *d_poly, *d_array;
int size_array = n * sizeof(float);
int size_poly = (degree+1) * sizeof(float);
//std::cout<<1<<std::endl;
//Allocating memory on the GPU.
HANDLE_ERROR(cudaMalloc(&d_array, size_array)); //std::cout<<2<<std::endl;
HANDLE_ERROR(cudaMalloc(&d_poly, size_poly)); //std::cout<<3<<std::endl;
//Copying variables from cpu to gpu.
HANDLE_ERROR(cudaMemcpy(d_poly, poly, size_poly, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_array, array, size_array, cudaMemcpyHostToDevice));
// Launch add() kernel on GPU
polynomial<<<(n+ THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_array,d_poly, degree, n);
// Copy result back to host
HANDLE_ERROR(cudaMemcpy(array, d_array, size_array, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(poly, d_poly, size_poly, cudaMemcpyDeviceToHost));
//Cleanup
HANDLE_ERROR(cudaFree(d_array));
HANDLE_ERROR(cudaFree(d_poly));
}
int main (int argc, char* argv[]) {
//TODO: add usage
if (argc < 4) {
std::cerr<<"usage: "<<argv[0]<<" n degree blocksize"<<std::endl;
return -1;
}
int n = atoi(argv[1]); //TODO: atoi is an unsafe function
int degree = atoi(argv[2]);
THREADS_PER_BLOCK= atoi(argv[3]);
int nbiter = 1;
float* array = new float[n];
float* poly = new float[degree+1];
for (int i=0; i<n; ++i)
array[i] = 1.;
for (int i=0; i<degree+1; ++i)
poly[i] = 1.;
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
for (int iter = 0; iter<nbiter; ++iter)
polynomial_expansion (poly, degree, n, array);
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end-begin)/nbiter;
std::cerr<<array[0]<<std::endl;
std::cout<<n<<" "<<degree<<" "<<totaltime.count()<<std::endl;
delete[] array;
delete[] poly;
return 0;
}
|
14,306 | extern "C" __constant__ int my_constant = 314;
extern "C" __global__ void add(
const float* x,
const float* y,
float* out,
unsigned int n
) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
out[i] = x[i] + y[i];
}
} |
14,307 |
#include <stdio.h>
__global__ void scepec(void);
int main(int argc, const char * argv[]){
scepec <<<1,1>>>();
printf("Pravkar sem poklical in izvedel scepec!");
return 0;
}
__global__ void scepec(void){
}
|
14,308 | #include "includes.h"
__global__ void gpu_stencil2D_4pt_hack5_cp_cols(double * dst, double * shared_cols, double *shared_rows,int tile_x,int tile_y, int M, int N){
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.y==0)){
printf("copy cols begin!\n");
}
#endif
int base_global_row = tile_y * blockIdx.y;
int base_global_col = tile_x * blockIdx.x;
int base_global_idx = N*base_global_row + base_global_col ;
int nextCol = base_global_col+1;
bool legalNextCol = (nextCol<N);
int t = threadIdx.y;
int idx = 2*M*blockIdx.x + t + base_global_row;
int idx_nextCol = idx + M ;
bool legalCurRow = (base_global_row + t)<M;
if(legalCurRow){
shared_cols[idx] = dst[base_global_idx + t*N];
}
if(legalNextCol && legalCurRow){
shared_cols[idx_nextCol] = dst[base_global_idx + t*N+1];
}
__syncthreads();
#ifdef CUDA_DARTS_DEBUG
// if(threadIdx.y==0){
// printf("blockDimy = %d\n",blockDim.y);
// }
if(blockIdx.x==1 && t<5){
printf("addr: %d ,%f,\n",idx_nextCol,shared_cols[idx_nextCol]);
}
#endif
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.y==0)){
printf("copy cols finish!\n");
}
#endif
} |
14,309 | __constant__ int numRepeats;
extern "C"
__global__ void bytePacketKernel(
int numPackets,
char* inputPackets,
int* packetIndices,
char* numHTTPPackets)
{
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numPackets)
{
int packetStart = packetIndices[tid];
int nextPacket = packetIndices[tid+1];
for(int repeat=0;repeat<numRepeats;repeat++) {
numHTTPPackets[tid]=0;
int state = 0;
for (int i=packetStart; i<nextPacket; i++)
{
switch (state) {
case 0:
if (inputPackets[i] == 'G' && inputPackets[i + 1] == 'E' &&
inputPackets[i + 2] == 'T' && inputPackets[i + 3] == ' ') {
state++;
i+=3;
}
break;
case 1:
if (inputPackets[i] == ' ')
state++;
break;
case 2:
if (inputPackets[i] == 'H' && inputPackets[i + 1] == 'T' &&
inputPackets[i + 2] == 'T' && inputPackets[i + 3] == 'P' &&
inputPackets[i + 4] == '/' && inputPackets[i + 6] == '.')
numHTTPPackets[tid]=1;
i=nextPacket; //EXIT LOOP
break;
}
}
}
}
} |
14,310 | #include <time.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__
void mdcSequencialCUDA(int *x, int *y, int *resto)
{
do
{
*resto = *x % *y;
*x = *y;
*y = *resto;
}
while (*resto != 0);
}
int main(void)
{
// Iniciando a contagem da execução do algoritmo
clock_t t; //variável para armazenar tempo
t = clock(); //armazena tempo
// hospedando cópias de x,y e resto
int x, y, resto;
// Cópias do sistema de x,y e resto
int *d_x, *d_y, *d_resto;
int size = sizeof(int);
// Alocando espaco para as variáveis
cudaMalloc((void **) &d_x, size);
cudaMalloc((void **) &d_y, size);
cudaMalloc((void **) &d_resto, size);
// Definido os valores de entrada
x = 50;
y = 20;
resto = 1;
// Copiando os valores de entrada para a GPU
cudaMemcpy(d_x, &x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, &y, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_resto, &resto, size, cudaMemcpyHostToDevice);
// Resolvendo a operacao MDC dentro da GPU
mdcSequencialCUDA<<<1,1>>>(d_x, d_y, d_resto);
// Copiando o resultado de volta para o host
cudaMemcpy(&x, d_x, size, cudaMemcpyDeviceToHost);
//cudaMemcpy(&y, d_y, size, cudaMemcpyDeviceToHost);
//cudaMemcpy(&resto, d_resto, size, cudaMemcpyDeviceToHost);
// Limpando a memória
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_resto);
//free(x);
//free(y);
//free(resto);
printf("Resultado: %i", x);
// Finalizando a contagem da execução do algoritmo
t = clock() - t; //tempo final - tempo inicial
double tempo_execucao = (double)(((double)t)/(CLOCKS_PER_SEC/1000)); //Dando o resultado em milissegundos.
//cout << "O tempo de execucao foi da aplicacao: " << (double)tempo_execucao << " milissegudos.";
//printf("tempo de execucao: %f", tempo_execucao);
printf(" Tempo gasto: %g ms.", tempo_execucao);
return 0;
}
|
14,311 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd3(double *a, double *b, double *c,double *d,int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
d[id] = a[id] + b[id]+c[id];
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd4(double *a, double *b, double *c,double *d,double *e,int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
e[id] = a[id] + b[id]+c[id]+d[id];
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd5(double *a, double *b, double *c,double *d,double *e,double *f,int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
f[id] = a[id] + b[id]+c[id]+d[id]+e[id];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100000;
int tensor_num=0;
int tensor_size=0;
clock_t start1,start2, end2,end1;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
double *h_d;
double *h_e;
double *h_f;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
double *d_d;
double *d_e;
double *d_f;
{
tensor_num=atoi(argv[1]);
tensor_size=atoi(argv[2]);
n=tensor_size;
//printf("Tensor num: %d, tensor_size: %d\n",tensor_num,tensor_size);
}
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
h_d = (double*)malloc(bytes);
h_e = (double*)malloc(bytes);
h_f = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
cudaMalloc(&d_d, bytes);
cudaMalloc(&d_e, bytes);
cudaMalloc(&d_f, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
start1=clock();
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
if(tensor_num>=3)
cudaMemcpy( d_c, h_c, bytes, cudaMemcpyHostToDevice);
if(tensor_num>=4)
cudaMemcpy( d_d, h_d, bytes, cudaMemcpyHostToDevice);
if(tensor_num>=5)
cudaMemcpy( d_e, h_e, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
cudaDeviceSynchronize();
start2=clock();
// Execute the kernel
if(tensor_num==2)
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
if(tensor_num==3)
vecAdd3<<<gridSize, blockSize>>>(d_a, d_b, d_c,d_d, n);
if(tensor_num==4)
vecAdd4<<<gridSize, blockSize>>>(d_a, d_b, d_c,d_d,d_e, n);
if(tensor_num==5)
vecAdd5<<<gridSize, blockSize>>>(d_a, d_b, d_c,d_d,d_e,d_f, n);
cudaDeviceSynchronize();
end2=clock();
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
end1=clock();
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
//for(i=0; i<n; i++)
// sum += h_c[i];
//printf("final result: %f\n", sum/n);
float time1 = (float)(end1 - start1) / CLOCKS_PER_SEC;
float time2 = (float)(end2 - start2) / CLOCKS_PER_SEC;
printf("[%d, %d]: %f ms, computing: %f ms\n", tensor_num,tensor_size, time1*1000, time2*1000);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_e);
cudaFree(d_f);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
free(h_d);
free(h_e);
free(h_f);
return 0;
} |
14,312 | #include <stdio.h>
// Kernel definition
__global__ void vecAdd(float* A, float* B, float* C) {
int i = threadIdx.x;
A[i] = 0;
B[i] = i;
C[i] = i;
}
int devcheck(int);
#define SIZE 10
int main() {
devcheck(0);
int N = SIZE;
float A[SIZE], B[SIZE], C[SIZE];
// Kernel invocation
int j;
for (j=0;j<N;j++)
{
A[j]=1;
B[j]=1;
C[j]=1;
}
float *devPtrA;
float *devPtrB;
float *devPtrC;
int memsize = SIZE * sizeof(float);
cudaMalloc((void**) &devPtrA, memsize);
cudaMalloc((void**) &devPtrB, memsize);
cudaMalloc((void**) &devPtrC, memsize);
cudaMemcpy(devPtrA, A, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(devPtrB, B, memsize, cudaMemcpyHostToDevice);
vecAdd<<<1, N>>>(devPtrA, devPtrB, devPtrC);
cudaMemcpy(C, devPtrC, memsize, cudaMemcpyDeviceToHost);
for (int i = 0; i < SIZE; i++)
printf("C[%d]=%f\n", i, C[i]);
cudaFree(devPtrA);
cudaFree(devPtrA);
cudaFree(devPtrA);
}
int devcheck(int gpudevice) {
int device_count = 0;
int device; // used with cudaGetDevice() to verify cudaSetDevice()
// get the number of non-emulation devices detected
cudaGetDeviceCount(&device_count);
if (gpudevice > device_count) {
printf("gpudevice >= device_count ... exiting\n");
exit(1);
}
cudaError_t cudareturn;
cudaDeviceProp deviceProp;
// cudaGetDeviceProperties() is also demonstrated in the deviceQuery/ example
// of the sdk projects directory
cudaGetDeviceProperties(&deviceProp, gpudevice);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n", deviceProp.major,
deviceProp.minor);
printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n", deviceProp.major );
printf(" Total amount of global memory: %u bytes\n", deviceProp.totalGlobalMem);
printf(" Total amount of constant memory: %u bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n", deviceProp.memPitch);
printf(" Texture alignment: %u bytes\n", deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n", deviceProp.clockRate * 1e-6f);
if (deviceProp.major > 999) {
printf("warning, CUDA Device Emulation (CPU) detected, exiting\n");
exit(1);
}
// choose a cuda device for kernel execution
cudareturn = cudaSetDevice(gpudevice);
if (cudareturn == cudaErrorInvalidDevice) {
perror("cudaSetDevice returned cudaErrorInvalidDevice");
} else {
// double check that device was properly selected
cudaGetDevice(&device);
printf("cudaGetDevice()=%d\n", device);
}
return 1;
}
|
14,313 | extern "C" __global__ void manual_dot_nn_op_float_m1_k256_n512_kernel0(float* input0, float* input1, float* output0)
{
int warp_id = threadIdx.x >> 5;
int lane_id = threadIdx.x & 31;
int col_id = blockIdx.x * blockDim.x / 4 + lane_id;
if (col_id < 512)
{
float val = 0;
int k_start = warp_id * 64;
int k_end = (warp_id + 1) * 64;
for (int i = k_start; i < k_end; i++)
{
val = fma(input0[i], input1[i * 512 + col_id], val);
}
if (warp_id == 0)
{
output0[col_id]=0;
}
__syncthreads();
atomicAdd(output0 + col_id, val);
}
} |
14,314 | #include <stdio.h>
__global__ void kernel(unsigned long long int *count) {
atomicAdd(count, (unsigned long long int)1);
//printf("%llu\n", *count);
}
int main()
{
unsigned long long int *count;
unsigned long long int *d_count;
int size = sizeof(unsigned long long int);
cudaMalloc((void **)&d_count, size);
count = (unsigned long long int *) malloc(size);
dim3 dimBlock (481, 271);//Number of Blocks required
dim3 dimGrid (32, 32);//Number of threads in each block
cudaMemcpy(d_count, 0, size, cudaMemcpyHostToDevice);
kernel <<< dimBlock, dimGrid >>> (d_count);
cudaMemcpy(count, d_count, size, cudaMemcpyDeviceToHost);
printf("%llu\n", *count);
free(count);
cudaFree(d_count);
return 0;
}
|
14,315 | #include "includes.h"
__global__ void square_matrix_kernel(int32_t num_rows, int32_t num_cols, const float* feats, int32_t ldf, float* feats_sq, int32_t lds) {
for (int i = blockIdx.y * blockDim.y + threadIdx.y; i < num_rows;
i += blockDim.y * gridDim.y) {
for (int j = blockIdx.x * blockDim.x + threadIdx.x; j < num_cols;
j += blockDim.x * gridDim.x) {
float f = feats[i * ldf + j];
feats_sq[i * lds + j] = f * f;
}
}
} |
14,316 | // Copyright (c) Phigent Robotics. All rights reserved.
// Reference https://arxiv.org/abs/2211.17111
#include <stdio.h>
#include <stdlib.h>
/*
Function: pillar pooling
Args:
c : number of channels
n_intervals : number of unique points
depth : input depth, FloatTensor[b,n,d,h,w]
feat : input feat, FloatTensor[b,n,h,w,c]
ranks_depth : input index of depth, IntTensor[n]
ranks_feat : input index of feat, IntTensor[n]
ranks_bev : output index, IntTensor[n]
interval_lengths : starting position for pooled point, IntTensor[n_intervals]
interval_starts : how many points in each pooled point, IntTensor[n_intervals]
out : output features, FloatTensor[b, d, h, w, c]
*/
__global__ void bev_pool_v2_kernel(int c, int n_intervals,
const float *__restrict__ depth,
const float *__restrict__ feat,
const int *__restrict__ ranks_depth,
const int *__restrict__ ranks_feat,
const int *__restrict__ ranks_bev,
const int *__restrict__ interval_starts,
const int *__restrict__ interval_lengths,
float* __restrict__ out) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int index = idx / c;
int cur_c = idx % c;
if (index >= n_intervals) return;
int interval_start = interval_starts[index];
int interval_length = interval_lengths[index];
float psum = 0;
const float* cur_depth;
const float* cur_feat;
for(int i = 0; i < interval_length; i++){
cur_depth = depth + ranks_depth[interval_start+i];
cur_feat = feat + ranks_feat[interval_start+i] * c + cur_c;
psum += *cur_feat * *cur_depth;
}
const int* cur_rank = ranks_bev + interval_start;
float* cur_out = out + *cur_rank * c + cur_c;
*cur_out = psum;
}
/*
Function: pillar pooling backward
Args:
c : number of channels
n_intervals : number of unique points
out_grad : gradient of the BEV fmap from top, FloatTensor[b, d, h, w, c]
depth : input depth, FloatTensor[b,n,d,h,w]
feat : input feat, FloatTensor[b,n,h,w,c]
ranks_depth : input index of depth, IntTensor[n]
ranks_feat : input index of feat, IntTensor[n]
ranks_bev : output index, IntTensor[n]
interval_lengths : starting position for pooled point, IntTensor[n_intervals]
interval_starts : how many points in each pooled point, IntTensor[n_intervals]
depth_grad : gradient of the depth fmap, FloatTensor
feat_grad : gradient of the feature fmap, FloatTensor
*/
__global__ void bev_pool_grad_kernel(int c, int n_intervals,
const float *__restrict__ out_grad,
const float *__restrict__ depth,
const float *__restrict__ feat,
const int *__restrict__ ranks_depth,
const int *__restrict__ ranks_feat,
const int *__restrict__ ranks_bev,
const int *__restrict__ interval_starts,
const int *__restrict__ interval_lengths,
float* __restrict__ depth_grad,
float* __restrict__ feat_grad) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_intervals) return;
int interval_start = interval_starts[idx];
int interval_length = interval_lengths[idx];
const int* cur_rank;
const float* cur_out_grad;
const float* cur_out_grad_start;
const float* cur_feat;
const float* cur_feat_start;
float* cur_depth_grad;
float grad_sum;
for(int i = 0; i < interval_length; i++){
cur_rank = ranks_bev + interval_start + i;
cur_out_grad_start = out_grad + * cur_rank * c;
cur_feat_start = feat + ranks_feat[interval_start+i] * c;
grad_sum = 0;
for(int cur_c = 0; cur_c < c; cur_c++){
cur_out_grad = cur_out_grad_start + cur_c;
cur_feat = cur_feat_start + cur_c;
grad_sum += *cur_out_grad * *cur_feat;
}
cur_depth_grad = depth_grad + ranks_depth[interval_start+i];
*cur_depth_grad = grad_sum;
}
float* cur_feat_grad;
const float* cur_depth;
for(int cur_c = 0; cur_c < c; cur_c++){
grad_sum = 0;
for(int i = 0; i < interval_length; i++){
cur_rank = ranks_bev + interval_start + i;
cur_out_grad = out_grad + *cur_rank * c + cur_c;
cur_depth = depth + ranks_depth[interval_start+i];
grad_sum += *cur_out_grad * *cur_depth;
}
cur_feat_grad = feat_grad + ranks_feat[interval_start] * c + cur_c ;
* cur_feat_grad = grad_sum;
}
}
void bev_pool_v2(int c, int n_intervals, const float* depth, const float* feat, const int* ranks_depth,
const int* ranks_feat, const int* ranks_bev, const int* interval_starts, const int* interval_lengths, float* out) {
bev_pool_v2_kernel<<<(int)ceil(((double)n_intervals * c / 256)), 256>>>(
c, n_intervals, depth, feat, ranks_depth, ranks_feat,
ranks_bev, interval_starts, interval_lengths, out
);
}
void bev_pool_v2_grad(int c, int n_intervals, const float* out_grad,
const float* depth, const float* feat, const int* ranks_depth, const int* ranks_feat,
const int* ranks_bev, const int* interval_starts, const int* interval_lengths, float* depth_grad, float* feat_grad) {
bev_pool_grad_kernel<<<(int)ceil(((double)n_intervals / 256)), 256>>>(
c, n_intervals, out_grad, depth, feat, ranks_depth, ranks_feat,
ranks_bev, interval_starts, interval_lengths, depth_grad, feat_grad
);
}
|
14,317 | #ifndef DELTA_CU
#define DELTA_CU
#include <math.h>
__device__ double peskin_delta(double x) {
double abs_x = fabs(x);
double root = -4. * x*x;
double phi = -2.* abs_x;
if (abs_x >= 2.0)
return 0.;
if (abs_x >= 1.0) {
root += 12. * abs_x - 7.;
phi += 5.;
phi -= sqrt(root);
} else {
root += 4. * abs_x + 1;
phi += 3.;
phi += sqrt(root);
}
return 0.125 * phi;
}
#endif
|
14,318 | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
typedef struct node{
//TODO template for any data type
int data;
struct node * next;
}node, * pnode;
typedef struct queue{
pnode head;
pnode tail;
}queue, *pqueue;
__device__ int enqueue(int mydata,pqueue myqueue);
__device__ int dequeue(pnode mynode, pqueue myqueue);
__device__ pnode myAtomicCAS(pnode * address, pnode compare, pnode val);
__device__ void deleteNode(pnode delnode);
__global__ void app_bfs(pqueue myqueue);
__global__ void init(pqueue myqueue);
__global__ void show(pqueue myqueue);
int isError(cudaError_t cudaStatus, char* error_info);
int main(int argc, char * argv[]){
int num_block, thread_per_block;
pqueue d_myqueue;
cudaError_t cudaStatus;
if(argc != 3){// with this if we cant go into cuda debug
printf("Usage: queue block_num thread_num\n");
return -1;
}
num_block = atoi(argv[1]);
thread_per_block = atoi(argv[2]);
cudaStatus = cudaDeviceReset();
if (isError(cudaStatus, "cudaDeviceReset error."))
return -1;
cudaStatus = cudaMalloc((void **)&d_myqueue, sizeof(queue));
cudaEvent_t start, stop;
float elapsedTime;
cudaStatus = cudaEventCreate(&start);
cudaStatus = cudaEventCreate(&stop);
cudaStatus = cudaEventRecord(start, 0);
init<<<1,1>>>(d_myqueue);
cudaStatus = cudaDeviceSynchronize();
app_bfs<<<num_block,thread_per_block>>>(d_myqueue);
cudaStatus = cudaDeviceSynchronize();
printf("[Info]%s\n",cudaGetErrorString(cudaGetLastError()));
//show<<<1,1>>>(d_myqueue);
//cudaStatus = cudaDeviceSynchronize();
cudaStatus = cudaEventRecord(stop, 0);
cudaStatus = cudaEventSynchronize(stop);
cudaStatus = cudaEventElapsedTime(&elapsedTime, start, stop);
printf("[Info]Block:%d\tThread:%d\tElapsedTime:%fms\n", num_block, thread_per_block, elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(d_myqueue);
//cudaDeviceSynchronize();
//printf("[Info]%s\n",cudaGetErrorString(cudaGetLastError()));
printf("[Info]Complete!\n");
return 0;
}
__global__ void init(pqueue myqueue){
pnode d_dummy = (pnode)malloc(sizeof(node));
d_dummy->data = -1;
d_dummy->next = NULL;
myqueue->head = d_dummy;
myqueue->tail = d_dummy;
}
int isError(cudaError_t cudaStatus, char* error_info)
{
if (cudaStatus != cudaSuccess){
printf("[Error]%s\n", error_info);
return 1;
}
else
return 0;
}
__global__ void show(pqueue myqueue){
pnode temp = myqueue->head;
while(temp != NULL){
printf("%d\t",temp->data);
temp = temp->next;
}
printf("\n");
}
__global__ void app_bfs(pqueue myqueue){
pnode newnode = (pnode)malloc(sizeof(node));
if(threadIdx.x % 2 == 1){
//if(1){
//printf("block:%d\tthread:%d\n", blockIdx.x, threadIdx.x);
enqueue(blockIdx.x * blockDim.x + threadIdx.x, myqueue);
}
else{
dequeue(newnode, myqueue);
}
}
__device__ pnode myAtomicCAS(pnode * address, pnode compare, pnode val){
//compare just the address, not the value.
//sizeof(data *) = 8 int x64 and 4 in win32
return (pnode)atomicCAS((unsigned long long int*)address, (unsigned long long int)compare, (unsigned long long int)val);
}
__device__ int enqueue(int newdata,pqueue myqueue){
pnode tail = NULL,next = NULL;
pnode newnode = (pnode)malloc(sizeof(node));
int flag = sizeof(node);
flag = sizeof(pnode);
/*
if (newnode == NULL){// added can avoid the unspecified launch failure!!!
printf("[Error]Malloc failed!\n");
return ;
}
*/
newnode->data = newdata;
newnode->next = NULL;
while(1){
tail = myqueue->tail;
next = tail->next;
if(tail == myqueue->tail){
if(next == NULL){
if(next == myAtomicCAS(&myqueue->tail->next, next, newnode)){
flag = 1;
break;
}
}
else{
myAtomicCAS(&myqueue->tail, tail, next);// success or not both ok
}
}
}
myAtomicCAS(&myqueue->tail, tail, newnode); // success or not both ok
return flag;
}
__device__ int dequeue(pnode mynode, pqueue myqueue){
pnode tail = NULL;
pnode head = NULL;
pnode next = NULL;
while(1){
head = myqueue->head;
tail = myqueue->tail;
next = head->next;
//printf("In:dequeue\n");
if(head == myqueue->head){
if(head == tail){
//printf("In:head == tail\n");
if(next == NULL){
//printf("Block:%d Thread:%d out:NULL\n", blockIdx.x, threadIdx.x);
return -1;
}
else
myAtomicCAS(&myqueue->tail, tail, next); // just try to do that...
}
else{
//printf("In:head!=tail\n");
mynode->data = next->data;
if(head == myAtomicCAS(&myqueue->head, head, next)){
break;
}
}
}
}
//TODO first we don't delete node
//deleteNode(head);
return 0;
}
__device__ void deleteNode(pnode delnode){
free(delnode);//TODO:delete node use memory reclamation
}
|
14,319 | #include <iostream>
/*
* Matrix is represented as 1D array. To access value in n-th row and m-th column
* type `array[n*COLUMNS + m];`
*/
struct Matrix {
float* arr;
int rows;
int columns;
Matrix(int _rows, int _columns):
rows(_rows), columns(_columns), arr(new float[_rows * _columns]) {}
int size() {
return sizeof(float) * rows * columns;
}
void set(int row, int col, float value) {
arr[row * columns + col] = value;
}
};
/*
* Some basic functions to play out with CUDA
*/
__global__ void cuda_hello_world() {
printf("Hello World from GPU!\n");
}
__global__ void assign (int* a, int* b) {
*b = *a;
}
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
}
/*
* This functions returns transpose of the matrix.
* You should call this function like this:
*
* transpose2D<<< columns, rows >>>(matrix, acc);
*/
__global__ void transpose2D (float* matrix, float* acc, int COLUMNS) {
int row = threadIdx.x;
int col = blockIdx.x;
acc[col * COLUMNS + row] = matrix[row * COLUMNS + col];
}
/*
* This function returns average of every value in matrix
*
* mean2D<<< rows >>>(matrix, acc);
*/
//__global__ void mean2D (float* matrix, float* acc) {
//int row = threadIdx.x;
//float sum = 0.0;
//for (int i = 0; i < COLUMNS; i++) {
//sum += matrix[row * COLUMNS + i];
//}
//*acc = sum / COLUMNS;
//}
/*
* This function return dot product of two matrices.
*
* matmul2D<<< columns, rows >>>(A, B, Acc);
*/
__global__ void matmul2D (float* A, float* B, float* Acc, int COLUMNS) {
int row = threadIdx.x;
int col = blockIdx.x;
int sum = 0;
for(int i = 0; i < COLUMNS; i++) {
sum += A[row * COLUMNS + i] * B[i * COLUMNS + col];
}
Acc[row * COLUMNS + col] = sum;
}
void printMatrix(Matrix matrix) {
for (int i = 0; i < matrix.columns; i++) {
std::cout << "| ";
for (int j = 0; j < matrix.rows; j++) {
std::cout << matrix.arr[i * matrix.columns + j] << " ";
}
std::cout << "|\n";
}
}
Matrix* transpose(Matrix* matrix) {
Matrix* result = new Matrix(matrix->columns, matrix->rows);
float* cudaOriginal;
float* cudaResult;
cudaMalloc((void**) &cudaOriginal, matrix->size());
cudaMalloc((void**) &cudaResult, result->size());
cudaMemcpy(cudaOriginal, matrix->arr, matrix->size(), cudaMemcpyHostToDevice);
transpose2D <<< matrix->columns, matrix->rows >>> (cudaOriginal, cudaResult, matrix->columns);
cudaMemcpy(result->arr, cudaResult, result->size(), cudaMemcpyDeviceToHost);
return result;
}
/*
* Return product of two matrices
*/
Matrix* matmul(Matrix* A, Matrix* B) {
if (A->columns != B->rows) {
std::cout << "Matrix dim mismatch, got ("
<< A->rows
<< ","
<< A->columns
<< ") and ("
<< B->rows
<< ","
<< B->columns
<< ")\n";
throw std::invalid_argument( "Invalid input" );
}
Matrix* result = new Matrix(A->rows, B->columns);
float* cudaA;
float* cudaB;
float* cudaAcc;
cudaMalloc((void**) &cudaA, A->size());
cudaMalloc((void**) &cudaB, B->size());
cudaMalloc((void**) &cudaAcc, result->size());
cudaMemcpy(cudaA, A->arr, A->size(), cudaMemcpyHostToDevice);
cudaMemcpy(cudaB, B->arr, B->size(), cudaMemcpyHostToDevice);
matmul2D <<< A->columns, A->rows >>> (cudaA, cudaB, cudaAcc, A->columns);
cudaMemcpy(result->arr, cudaAcc, result->size(), cudaMemcpyDeviceToHost);
return result;
}
Matrix* loadData() {
int n, p;
float temp;
std::cin >> n;
std::cin >> p;
Matrix* data = new Matrix(n, p);
for (int i = 0; i < n; i++) {
for (int j = 0; j < p+1; j++) {
std::cin >> temp;
data->set(i, j, temp);
}
}
return data;
}
int main() {
Matrix* A = new Matrix(2, 2);
A->arr = new float[2*2]{1, 1, 2, 2};
Matrix* B = new Matrix(2, 2);
B->arr = new float[2*2]{3, 3, 4, 4};
//Matrix* A = loadData();
//Matrix* B = loadData();
Matrix* mul = matmul(A, B);
printMatrix(*mul);
Matrix* t = transpose(mul);
printMatrix(*t);
//
// SOME MATRIX
//float* acc;
//int size = (ROWS*COLUMNS)*sizeof(float);
//float* zeroMatrix = (float*)malloc(size);
//printMatrix(zeroMatrix);
//
//float* cudaMatrix;
//float* cudaAcc;
//cudaMalloc((void**) &cudaMatrix, size);
//cudaMalloc((void**) &cudaAcc, sizeof(float));
//cudaMemcpy(cudaMatrix, zeroMatrix, size, cudaMemcpyHostToDevice);
//std::cout << "test\n";
//mean2D <<< 1, ROWS >>> (cudaMatrix, cudaAcc);
//cudaMemcpy(acc, cudaAcc, sizeof(float), cudaMemcpyDeviceToHost);
//std::cout << *acc;
//
//
// ASSIGN
//int a = 5, b;
//int *cudaA, *cudaB;
//int size = sizeof(int);
//cudaMalloc((void**) &cudaA, size);
//cudaMalloc((void**) &cudaB, size);
//cudaMemcpy(cudaA, &a, size, cudaMemcpyHostToDevice);
//assign <<< 1, 1 >>> (cudaA, cudaB);
//cudaMemcpy(&b, cudaB, size, cudaMemcpyDeviceToHost);
//std::cout << b;
//
//
// HELLO WORLD
//cuda_hello_world<<<1,1>>>();
//
//
// ADD
//int a = 2, b = 3, c;
//int *cudaA, *cudaB, *cudaC;
//int size = sizeof(int);
//cudaMalloc((void **)&cudaA, size);
//cudaMalloc((void **)&cudaB, size);
//cudaMalloc((void **)&cudaC, size);
//
//cudaMemcpy(cudaA, &a, size, cudaMemcpyHostToDevice);
//cudaMemcpy(cudaB, &b, size, cudaMemcpyHostToDevice);
//add<<<1,1>>>(cudaA, cudaB, cudaC);
//
//cudaMemcpy(&c, cudaC, size, cudaMemcpyDeviceToHost);
//
//std::cout << c;
return 0;
}
|
14,320 | #include "includes.h"
__global__ void cuArraysCopyExtractVaryingOffsetCorr(const float *imageIn, const int inNX, const int inNY, float *imageOut, const int outNX, const int outNY, int *imageValid, const int nImages, const int2 *maxloc)
{
int idxImage = blockIdx.z;
int outx = threadIdx.x + blockDim.x*blockIdx.x;
int outy = threadIdx.y + blockDim.y*blockIdx.y;
int inx = outx + maxloc[idxImage].x - outNX/2;
int iny = outy + maxloc[idxImage].y - outNY/2;
if (outx < outNX && outy < outNY)
{
int idxOut = ( blockIdx.z * outNX + outx ) * outNY + outy;
int idxIn = ( blockIdx.z * inNX + inx ) * inNY + iny;
if (inx>=0 && iny>=0 && inx<inNX && iny<inNY) {
imageOut[idxOut] = imageIn[idxIn];
imageValid[idxOut] = 1;
}
else {
imageOut[idxOut] = 0.0f;
imageValid[idxOut] = 0;
}
}
} |
14,321 | #include<stdio.h>
__global__ void local(float in){
float f;
f=in;
}
__global__ void global(float *a){
a[threadIdx.x]=2.0f*(float)threadIdx.x;
}
__global__ void shared(float *a){
int i,index=threadIdx.x;
float avg,sum=0.0f;
__shared__ float sh_a[128];
sh_a[index]=a[index];
__syncthreads();
//print(avg ther)
}
int main()
{
local<<<1,128>>>(2.0f);
float har[128];
float *dar;
cudaMalloc((void **)&dar,sizeof(float)*128);
cudaMemcpy((void *)dar,(void *)har,sizeof(float)*128,cudaMemcpyHostToDevice);
global<<<1,128>>>(dar);
cudaMemcpy((void *)har,(void *)dar,sizeof(float)*128,cudaMemcpyDeviceToHost);
shared<<<1,128>>>(dar);
cudaMemcpy((void *)har,(void *)dar,sizeof(float)*128,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
return 0;
}
|
14,322 | #include "includes.h"
__global__ void invert(float *output, int* input, const int size)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i<size)
{
int d = input[i];
if(d>0)
{
output[i] = __fdividef(1.f, d);
}
}
} |
14,323 | #include <stdio.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define N 1000000
//generar arreglo
void GenArray(int **array)
{
int *array1 = new int[N];
for (int i = 0; i < N; i++)
array1[i] = rand() % 1000;
*array = array1;
}
//procesamiento CPU
void FindMax(int *arr, int *max)
{
*max = 0;
for (int i = 0; i < N; i++)
{
if (*max < arr[i])
*max = arr[i];
}
}
//Procesamiento GPU
//pregunta 2
__global__ void FindMax2(int *arr, int *max)
{
int tid = threadIdx.x;
int local_max = 0;
for (int i = tid * 97657; i < (tid + 1) * 97657 && i < N; i++)
{
if (local_max < arr[i])
{
local_max = arr[i];
}
}
//una vez todos los threads del bloque terminan de encontrar su máximo, se hace la reducción
__shared__ int arrSM[1024];
arrSM[tid] = local_max;
int reduccion = 1024 / 2;
while (reduccion > 0)
{
__syncthreads();
if (tid < reduccion && arrSM[tid] < arrSM[tid + reduccion])
arrSM[tid] = arrSM[tid + reduccion];
reduccion /= 2;
}
if (tid == 0)
*max = arrSM[0];
}
//pregunta 3
__global__ void FindMax3(int *arr, int *max, int largo_arreglo)
{
//cada hebra copia un dato en el arreglo de memoria compartida
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < largo_arreglo)
{
__shared__ int arrSM[256];
arrSM[threadIdx.x] = arr[tid];
//reduccion
int reduccion = 256 / 2;
while (reduccion > 0)
{
__syncthreads();
if (threadIdx.x < reduccion && arrSM[threadIdx.x] < arrSM[threadIdx.x + reduccion])
arrSM[threadIdx.x] = arrSM[threadIdx.x + reduccion];
reduccion /= 2;
}
//si es que solo hay un bloque, se retorna el resultado final
if (largo_arreglo < 256){
*max = arrSM[0];
//si es el primer thread del bloque
} else if (threadIdx.x == 0) {
arr[blockIdx.x] = arrSM[0];
}
}
}
//pregunta 4
__global__ void FindMax4(int *arr, int *max)
{
//cada hebra copia un dato en el arreglo de memoria compartida
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N)
{
__shared__ int arrSM[256];
arrSM[threadIdx.x] = arr[tid];
//reduccion
int reduccion = 256 / 2;
while (reduccion > 0)
{
__syncthreads();
if (threadIdx.x < reduccion && arrSM[threadIdx.x] < arrSM[threadIdx.x + reduccion])
arrSM[threadIdx.x] = arrSM[threadIdx.x + reduccion];
reduccion /= 2;
}
// __syncthreads();
if (threadIdx.x == 0)
atomicMax(max, arrSM[0]);
}
}
/*-------------------------*/
int main(int argc, char **argv)
{
clock_t t1, t2;
double ms;
cudaEvent_t ct1, ct2;
float dt;
int *Ahost;
int *A;
int gs = 1, bs = 1024;
GenArray(&Ahost);
//CPU
int max_total;
t1 = clock();
FindMax(Ahost, &max_total);
t2 = clock();
ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC;
printf("Tiempo CPU: %f[ms]\n", ms);
printf("Maximo: %d\n", max_total);
/*
* Pregunta 1 - Funcion CPU
*/
bs = 1024;
gs = 1;
max_total = 0;
int *max;
cudaMalloc((void **)&max, sizeof(int));
cudaMalloc((void **)&A, N * sizeof(int));
cudaMemcpy(A, Ahost, N * sizeof(int), cudaMemcpyHostToDevice);
/*
* Pregunta 2 - Funcion GPU un bloque
*/
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
FindMax2<<<gs, bs>>>(A, max);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
cudaMemcpy(&max_total, max, sizeof(int), cudaMemcpyDeviceToHost);
printf("Tiempo GPU Un Bloque: %f[ms]\n", dt);
printf("Maximo: %d\n", max_total);
/*
* Pregunta 3 - Funcion GPU multiples bloques
*/
bs = 256;
int largo_arreglo = N;
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
while (largo_arreglo < 256)
{
FindMax3<<<(int)ceil((float)largo_arreglo / bs), bs>>>(A, max, largo_arreglo);
largo_arreglo = (int)ceil((float)largo_arreglo / bs);
}
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
cudaMemcpy(&max_total, max, sizeof(int), cudaMemcpyDeviceToHost);
printf("Tiempo GPU Multiples Bloques: %f[ms]\n", dt);
printf("Maximo: %d\n", max_total);
/*
* Pregunta 4 - Funcion GPU multiples bloques con op atom
*/
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
FindMax4<<<gs, bs>>>(A, max); // GS ??
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
cudaMemcpy(&max_total, max, sizeof(int), cudaMemcpyDeviceToHost);
printf("Tiempo GPU Multiples Bloques Op. Atom: %f[ms]\n", dt);
printf("Maximo: %d\n", max_total);
cudaFree(A);
delete[] Ahost;
return 0;
} |
14,324 | #include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/time.h>
#include <cuda.h>
#define BW 16 // Block Width
#define BH 32 // Block Height
#define COUNT 0
// Kernel Function handles first nested for loop
__global__ void kernelBlur(int *d_Rnew, int *d_Gnew, int *d_Bnew, int *d_R, int *d_G, int *d_B, int rowsize, int colsize) {
// Set-up
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
// Run Some Calculations
if (col < colsize && row < rowsize) {
if (row != 0 && row != (rowsize-1) && col != 0 && col != (colsize-1)) {
d_Rnew[row * colsize + col] = (d_R[(row + 1) * colsize + col]+d_R[(row - 1) * colsize + col]+d_R[row * colsize + (col + 1)]+d_R[row * colsize + (col - 1)])/4;
d_Gnew[row * colsize + col] = (d_G[(row + 1) * colsize + col]+d_G[(row - 1) * colsize + col]+d_G[row * colsize + (col + 1)]+d_G[row * colsize + (col - 1)])/4;
d_Bnew[row * colsize + col] = (d_B[(row + 1) * colsize + col]+d_B[(row - 1) * colsize + col]+d_B[row * colsize + (col + 1)]+d_B[row * colsize + (col - 1)])/4;
}
else if (row == 0 && col != 0 && col != (colsize-1)){
d_Rnew[row * colsize + col] = (d_R[(row + 1) * colsize + col]+d_R[row * colsize + (col + 1)]+d_R[row * colsize + (col - 1)])/3;
d_Gnew[row * colsize + col] = (d_G[(row + 1) * colsize + col]+d_G[row * colsize + (col + 1)]+d_G[row * colsize + (col - 1)])/3;
d_Bnew[row * colsize + col] = (d_B[(row + 1) * colsize + col]+d_B[row * colsize + (col + 1)]+d_B[row * colsize + (col - 1)])/3;
}
else if (row == (rowsize-1) && col != 0 && col != (colsize-1)){
d_Rnew[row * colsize + col] = (d_R[(row - 1) * colsize + col]+d_R[row * colsize + (col + 1)]+d_R[row * colsize + (col - 1)])/3;
d_Gnew[row * colsize + col] = (d_G[(row - 1) * colsize + col]+d_G[row * colsize + (col + 1)]+d_G[row * colsize + (col - 1)])/3;
d_Bnew[row * colsize + col] = (d_B[(row - 1) * colsize + col]+d_B[row * colsize + (col + 1)]+d_B[row * colsize + (col - 1)])/3;
}
else if (col == 0 && row != 0 && row != (rowsize-1)){
d_Rnew[row * colsize + col] = (d_R[(row + 1) * colsize + col]+d_R[(row - 1) * colsize + col]+d_R[row * colsize + (col + 1)])/3;
d_Gnew[row * colsize + col] = (d_G[(row + 1) * colsize + col]+d_G[(row - 1) * colsize + col]+d_G[row * colsize + (col + 1)])/3;
d_Bnew[row * colsize + col] = (d_B[(row + 1) * colsize + col]+d_B[(row - 1) * colsize + col]+d_B[row * colsize + (col + 1)])/3;
}
else if (col == (colsize-1) && row != 0 && row != (rowsize-1)){
d_Rnew[row * colsize + col] = (d_R[(row + 1) * colsize + col]+d_R[(row - 1) * colsize + col]+d_R[row * colsize + (col + 1)])/3;
d_Gnew[row * colsize + col] = (d_G[(row + 1) * colsize + col]+d_G[(row - 1) * colsize + col]+d_G[row * colsize + (col + 1)])/3;
d_Bnew[row * colsize + col] = (d_B[(row + 1) * colsize + col]+d_B[(row - 1) * colsize + col]+d_B[row * colsize + (col + 1)])/3;
}
else if (row==0 &&col==0){
d_Rnew[row * colsize + col] = (d_R[row * colsize + (col + 1)]+d_R[(row + 1) * colsize + col])/2;
d_Gnew[row * colsize + col] = (d_G[row * colsize + (col + 1)]+d_G[(row + 1) * colsize + col])/2;
d_Bnew[row * colsize + col] = (d_B[row * colsize + (col + 1)]+d_B[(row + 1) * colsize + col])/2;
}
else if (row==0 &&col==(colsize-1)){
d_Rnew[row * colsize + col] = (d_R[row * colsize + (col - 1)]+d_R[(row + 1) * colsize + col])/2;
d_Gnew[row * colsize + col] = (d_G[row * colsize + (col - 1)]+d_G[(row + 1) * colsize + col])/2;
d_Bnew[row * colsize + col] = (d_B[row * colsize + (col - 1)]+d_B[(row + 1) * colsize + col])/2;
}
else if (row==(rowsize-1) &&col==0){
d_Rnew[row * colsize + col] = (d_R[row * colsize + (col + 1)]+d_R[(row - 1) * colsize + col])/2;
d_Gnew[row * colsize + col] = (d_G[row * colsize + (col + 1)]+d_G[(row - 1) * colsize + col])/2;
d_Bnew[row * colsize + col] = (d_B[row * colsize + (col + 1)]+d_B[(row - 1) * colsize + col])/2;
}
else if (row==(rowsize-1) &&col==(colsize-1)){
d_Rnew[row * colsize + col] = (d_R[row * colsize + (col - 1)]+d_R[(row - 1) * colsize + col])/2;
d_Gnew[row * colsize + col] = (d_G[row * colsize + (col - 1)]+d_G[(row - 1) * colsize + col])/2;
d_Bnew[row * colsize + col] = (d_B[row * colsize + (col - 1)]+d_B[(row - 1) * colsize + col])/2;
}
}
}
// Kernel Function handles second nested for loop updates RGB values to new calculated values
__global__ void kernelCopy(int *d_Rnew, int *d_Gnew, int *d_Bnew, int *d_R, int *d_G, int *d_B, int rowsize, int colsize) {
// Set-up
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if (col < colsize && row < rowsize) {
d_R[row * colsize + col] = d_Rnew[row * colsize + col];
d_G[row * colsize + col] = d_Gnew[row * colsize + col];
d_B[row * colsize + col] = d_Bnew[row * colsize + col];
}
}
void performBlurs(int *h_R, int *h_G, int *h_B, int *h_Rnew, int *h_Gnew, int *h_Bnew, int rowsize, int colsize, int nblurs) {
// Assign Memory on GPU
// Step 1 Assign Memory on GPU
int k;
int sizei = sizeof(int)*rowsize*colsize;
int *d_R, *d_G, *d_B, *d_Rnew, *d_Gnew, *d_Bnew;
struct timeval tim;
gettimeofday(&tim, NULL);
double t1=tim.tv_sec+(tim.tv_usec/1000000.0);
cudaMalloc((void **)&d_R,sizei);
cudaMalloc((void **)&d_G,sizei);
cudaMalloc((void **)&d_B,sizei);
cudaMalloc((void **)&d_Rnew,sizei);
cudaMalloc((void **)&d_Gnew,sizei);
cudaMalloc((void **)&d_Bnew,sizei);
gettimeofday(&tim, NULL);
double t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("Assigning Memory to GPU > %.6lf seconds elapsed\n", t2-t1);
// Transfer to Device
gettimeofday(&tim, NULL);
t1=tim.tv_sec+(tim.tv_usec/1000000.0);
cudaMemcpy(d_R, h_R, sizei, cudaMemcpyHostToDevice);
cudaMemcpy(d_G, h_G, sizei, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizei, cudaMemcpyHostToDevice);
cudaMemcpy(d_Rnew, h_Rnew, sizei, cudaMemcpyHostToDevice);
cudaMemcpy(d_Gnew, h_Gnew, sizei, cudaMemcpyHostToDevice);
cudaMemcpy(d_Bnew, h_Bnew, sizei, cudaMemcpyHostToDevice);
t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("Transferring from host to device memory > %.6lf seconds elapsed\n", t2-t1);
// Set up Blocks
dim3 dimGrid(ceil(colsize/(float)BW), ceil(rowsize/(float)BH), 1);
dim3 dimBlock(BW,BH);
nblurs = 10; // Modify as Needed
gettimeofday(&tim, NULL);
t1=tim.tv_sec+(tim.tv_usec/1000000.0);
for (k = 0; k < nblurs; ++k) {
kernelBlur<<<dimGrid, dimBlock>>>(d_Rnew, d_Gnew, d_Bnew, d_R, d_G, d_B, rowsize, colsize);
kernelCopy<<<dimGrid, dimBlock>>>(d_Rnew, d_Gnew, d_Bnew, d_R, d_G, d_B, rowsize, colsize);
}
t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("Blurring Operation > %.6lf seconds elapsed\n", t2-t1);
// Step 4 output copied from GPU to Host get the RGB values
cudaMemcpy(h_R, d_R, sizei, cudaMemcpyDeviceToHost);
cudaMemcpy(h_G, d_G, sizei, cudaMemcpyDeviceToHost);
cudaMemcpy(h_B, d_B, sizei, cudaMemcpyDeviceToHost);
// Step 5 Free Memory
cudaFree(d_R); cudaFree(d_G); cudaFree(d_B); cudaFree(d_Rnew); cudaFree(d_Gnew); cudaFree(d_Bnew);
}
int main (int argc, const char * argv[]) {
// Assignment of initial Variables
static int const maxlen = 200, rowsize = 521, colsize = 428, linelen = 12;
static char str[200], lines[5][200];
FILE *fp, *fout;
int nlines = 0;
unsigned int h1, h2, h3;
char *sptr;
// Define Host Arrays
int *h_R, *h_G, *h_B;
int *h_Rnew, *h_Gnew, *h_Bnew;
int size = sizeof(int) * rowsize * colsize;
h_R = (int *)malloc(size);
h_G = (int *)malloc(size);
h_B = (int *)malloc(size);
h_Rnew = (int *)malloc(size);
h_Gnew = (int *)malloc(size);
h_Bnew = (int *)malloc(size);
// Allocate Overall Size of ROw
int row = 0, col = 0, nblurs = 0, lineno=0, k;
// Read input file
struct timeval tim;
gettimeofday(&tim, NULL);
double t1=tim.tv_sec+(tim.tv_usec/1000000.0);
fp = fopen("sample.ps", "r");
while(! feof(fp))
{
fscanf(fp, "\n%[^\n]", str);
if (nlines < 5) {strcpy((char *)lines[nlines++],(char *)str);}
else{
for (sptr=&str[0];*sptr != '\0';sptr+=6){
sscanf(sptr,"%2x",&h1);
sscanf(sptr+2,"%2x",&h2);
sscanf(sptr+4,"%2x",&h3);
if (col==colsize){
col = 0;
row++;
}
if (row < rowsize) {
h_R[row * colsize + col] = h1;
h_G[row * colsize + col] = h2;
h_B[row * colsize + col] = h3;
}
col++;
}
}
}
fclose(fp);
gettimeofday(&tim, NULL);
double t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("Reading Input File > %.6lf seconds elapsed\n", t2-t1);
// Run Code
performBlurs(h_R, h_G, h_B, h_Rnew, h_Gnew, h_Bnew, rowsize, colsize, nblurs);
gettimeofday(&tim, NULL);
t1=tim.tv_sec+(tim.tv_usec/1000000.0);
fout= fopen("sampleBlurCU.ps", "w");
for (k=0;k<nlines;k++) fprintf(fout,"\n%s", lines[k]);
fprintf(fout,"\n");
for(row=0;row<rowsize;row++){
for (col=0;col<colsize;col++){
fprintf(fout,"%02x%02x%02x",h_R[row * colsize + col],h_G[row * colsize + col],h_B[row * colsize + col]);
lineno++;
if (lineno==linelen){
fprintf(fout,"\n");
lineno = 0;
}
}
}
gettimeofday(&tim, NULL);
t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("Outputting File > %.6lf seconds elapsed\n", t2-t1);
fclose(fout);
return 0;
}
|
14,325 | /*
number of mathematical operations (only floating point)
operation flo/o total
+-* : 27 1 27
/ : 2 4 8
pow : 1 13 13
sum 48
*/
#define M2M_KERNEL_CORE \
rh=rb*sqrtf(3.0f)/4;\
jbase=(je-1)*mpdnm;\
n=ng[tx];\
m=mg[tx];\
nms=n*(n+1)/2+m;\
for(i=0;i<2;i++) vecd[i]=0;\
for(k=-n;k<0;k++){\
nks=n*(n+1)/2-k;\
nmk=jbase+(4*n*n*n+6*n*n+5*n)/3+m*(2*n+1)+k;\
dnmre=lvec[2*nmk+0];\
dnmim=lvec[2*nmk+1];\
vecd[0]+=dnmre*vecj[2*nks+0];\
vecd[0]+=dnmim*vecj[2*nks+1];\
vecd[1]-=dnmre*vecj[2*nks+1];\
vecd[1]+=dnmim*vecj[2*nks+0];\
}\
for(k=0;k<=n;k++){\
nks=n*(n+1)/2+k;\
nmk=jbase+(4*n*n*n+6*n*n+5*n)/3+m*(2*n+1)+k;\
dnmre=lvec[2*nmk+0];\
dnmim=lvec[2*nmk+1];\
vecd[0]+=dnmre*vecj[2*nks+0];\
vecd[0]-=dnmim*vecj[2*nks+1];\
vecd[1]+=dnmre*vecj[2*nks+1];\
vecd[1]+=dnmim*vecj[2*nks+0];\
}\
__syncthreads();\
for(i=0;i<2;i++) vecj[2*nms+i]=vecd[i];\
__syncthreads();\
j=ng[tx];\
k=mg[tx];\
jks=j*(j+1)/2+k;\
for(i=0;i<2;i++) vecd[i]=0;\
fnmm=1.0;\
for(i=0;i<j-k;i++) fnmm=fnmm*(i+1);\
fnpm=1.0;\
for(i=0;i<j+k;i++) fnpm=fnpm*(i+1);\
ajk=pow(-1.0,j)*rsqrtf(fnmm*fnpm);\
for(n=0;n<=j-abs(k);n++){\
nks=(j-n)*(j-n+1)/2+k;\
jnk=n*n+n;\
fnmm=1.0;\
for(i=0;i<j-n-k;i++) fnmm=fnmm*(i+1);\
fnpm=1.0;\
for(i=0;i<j-n+k;i++) fnpm=fnpm*(i+1);\
ank=pow(-1.0,j-n)*rsqrtf(fnmm*fnpm);\
fnpm=1.0;\
for(i=0;i<n;i++) fnpm=fnpm*(i+1);\
ajn=pow(-1.0,n)/fnpm;\
sr=pow(-1.0,n)*ank*ajn/ajk;\
cnmre=sr*ynmre[jnk]*pow(rh,n);\
cnmim=sr*ynmim[jnk]*pow(rh,n);\
vecd[0]+=vecj[2*nks+0]*cnmre;\
vecd[0]-=vecj[2*nks+1]*cnmim;\
vecd[1]+=vecj[2*nks+0]*cnmim;\
vecd[1]+=vecj[2*nks+1]*cnmre;\
}\
__syncthreads();\
for(i=0;i<2;i++) vecj[2*jks+i]=vecd[i];\
__syncthreads();\
jbase=(je+nrbm-1)*mpdnm;\
n=ng[tx];\
m=mg[tx];\
nms=n*(n+1)/2+m;\
for(i=0;i<2;i++) vecd[i]=0;\
for(k=-n;k<0;k++){\
nks=n*(n+1)/2-k;\
nmk=jbase+(4*n*n*n+6*n*n+5*n)/3+m*(2*n+1)+k;\
dnmre=lvec[2*nmk+0];\
dnmim=lvec[2*nmk+1];\
vecd[0]+=dnmre*vecj[2*nks+0];\
vecd[0]+=dnmim*vecj[2*nks+1];\
vecd[1]-=dnmre*vecj[2*nks+1];\
vecd[1]+=dnmim*vecj[2*nks+0];\
}\
for(k=0;k<=n;k++){\
nks=n*(n+1)/2+k;\
nmk=jbase+(4*n*n*n+6*n*n+5*n)/3+m*(2*n+1)+k;\
dnmre=lvec[2*nmk+0];\
dnmim=lvec[2*nmk+1];\
vecd[0]+=dnmre*vecj[2*nks+0];\
vecd[0]-=dnmim*vecj[2*nks+1];\
vecd[1]+=dnmre*vecj[2*nks+1];\
vecd[1]+=dnmim*vecj[2*nks+0];\
}
|
14,326 | /*
* Prashant Solanki (Unity: psolank)
* Simple Image convolutions implementation without tiling
* Convolutions mask is stored in constant memory
* Tested with CUDA Toolkit 3.0
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//#include <cuda.h>
#define BUF_SIZE 200000
#define ThreadsPerBlockX 8
#define ThreadsPerBlockY 8
#define CUDA_CALL(X) if(cudaSuccess != X) printf("Call Failed at %s\n",__LINE__);
int count_cols(char *buff);
void count_rc(char *fname, int *r1, int *c1, int *r2, int *c2);
void print2D(float *arr, int r, int c);
float* alloc2D(int r, int c);
void free2D(float *arr);
void parse2D(FILE *f, float *arr, int r, int c);
void parse2DPadded(FILE *f, float *arr, int r, int c, int px, int py);
void flip_kernel(float * arr, int r, int c);
// Constant cache memory to store convolution mask and its size
__constant__ float dMask[100];
__constant__ int dHalfW;
__constant__ int dHalfH;
// kernel to convolve image with mask
// one thread processes one pixel in input image
__global__ void conv2DKernel(float *in, float *out, int r1, int c1) {
int i,j; int x,y;
int maskIndex = 0;
// computing row and column of pixel for which convolution os to be done
int r = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.x*blockDim.x + threadIdx.x;
float acc = 0.0f;
// accessing neighbouring pixels and multiplying with mask
for(i = -dHalfH; i <= dHalfH; i++){
for(j = -dHalfW; j <= dHalfW; j++){
x = c + j;
y = r + i;
// condition to check if element is outside the image
if(x >= 0 && x < c1 && y >= 0 && y < r1){
acc = acc + (dMask[maskIndex] * in[ y*c1 + x ]);
}
maskIndex++;
}
}
// condition to check if element is outside image
if(r < r1 && c < c1){
out[ r*c1 + c ] = acc;
}
}
int main(int argc, char **argv) {
float *hInput;
float *hMask;
float *hOutput;
float *dInput;
float *dOutput;
int r1,c1,r2,c2, R, C;
FILE *fptr;
if(argc < 2) { printf(" Please specify input filename\n"); return -1;}
// Finding dimensions of input matricex
count_rc(argv[1],&r1, &c1, &r2, &c2);
if(r1 == 0) return -1;
// conputing dimensions of output matrix
R = (r1 + r2) -1;
C = (c1 + c2) -1;
// allocating input matrices
hInput = alloc2D(R, C);
// zeroing the input matrix
memset(hInput, 0, sizeof(float)*R*C);
// allocation mask
hMask = alloc2D(10, 10);
// allocating output matix
hOutput = alloc2D(R, C);
// opening input file
fptr = fopen(argv[1], "rb");
// parsing first matrix withing the padded region defined as c2/2 and r2/2
parse2DPadded(fptr, hInput, r1, c1, c2/2, r2/2);
// parsing mask
parse2D(fptr, hMask, r2, c2);
// closing the file
fclose(fptr);
// flipping kernel vertically and horizontally
flip_kernel(hMask, r2, c2);
// print2D(hMask, r2, c2);
r2 = r2/2;
c2 = c2/2;
// allocating gpu memory
CUDA_CALL(cudaMalloc((void**)&dInput, R*C*sizeof(float)));
//err = cudaMalloc((void**)&dMask, r2*c2*sizeof(float));
CUDA_CALL(cudaMalloc((void**)&dOutput, R*C*sizeof(float)));
// Copy memory to the GPU
CUDA_CALL(cudaMemcpy(dInput, hInput, sizeof(float)*R*C, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(dMask, hMask, sizeof(float)*10*10, 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(dHalfW, (const int*)&r2, sizeof(int), 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(dHalfH, (const int*)&c2, sizeof(int), 0, cudaMemcpyHostToDevice));
// Initialize the grid and block dimensions
dim3 numThreads(ThreadsPerBlockX,ThreadsPerBlockY,1);
dim3 numBlocks( ((C-1)/ThreadsPerBlockX)+1, ((R-1)/ThreadsPerBlockY)+1, 1 );
// Launch the GPU Kernel
conv2DKernel<<<numBlocks, numThreads>>>(dInput, dOutput, R, C);
//cudaDeviceSynchronize();
CUDA_CALL(cudaThreadSynchronize());
// Copy the GPU memory back to the CPU
CUDA_CALL(cudaMemcpy(hOutput, dOutput, R*C*sizeof(float), cudaMemcpyDeviceToHost));
// free the GPU memory
CUDA_CALL(cudaFree(dInput));
CUDA_CALL(cudaFree(dOutput));
// printing result
print2D(hOutput, R, C);
// free the host memory
free2D(hInput);
free2D(hMask);
free2D(hOutput);
return 0;
}
// count number of rows and columns for the given input file
void count_rc(char *fname, int *r1, int *c1, int *r2, int *c2)
{
*r1 = 0; *c1 = 0; *r2 = 0; *c2 =0;
char *buff = (char*)malloc(BUF_SIZE);
FILE *f = fopen(fname, "rb");
if(f == NULL){ printf("Unable to open file %s\n",fname); free(buff); return; }
fgets(buff, BUF_SIZE, f);
*c1 = count_cols(buff);
while(strlen(buff) > 1){
(*r1)++;
fgets(buff, BUF_SIZE, f);
}
fgets(buff, BUF_SIZE, f);
*c2 = count_cols(buff);
while(strlen(buff) > 1){
(*r2)++;
if(NULL == fgets(buff, BUF_SIZE, f)) break;
if((feof(f)) && (strlen(buff) > 1) ){(*r2)++; break;}
}
free(buff);
fclose(f);
}
// count number of columns in given buffer
int count_cols(char *buff)
{
int i;int n=1;
for(i=0; i<strlen(buff)-1; i++)
{
if(buff[i] == ' '){
if(buff[i+1] != '\n' && buff[i+1] != '\r' && buff[i+1] != ' '){
n++;
}
}
}
return n;
}
// print a 2D matrix
void print2D(float *arr, int r, int c)
{
int i,j;
for(i=0; i<r; i++){
for(j=0; j<c; j++){
if(j>0) printf(" ");
printf("%f",arr[ i*r + j]);
}
printf("\n");
}
}
// allocate memory for matrix of size rxc
float* alloc2D(int r, int c)
{
return (float*)malloc( r*c*sizeof(float) );
}
// free memory
void free2D(float *arr)
{
free(arr);
}
// parsing a matrix of size rxc
void parse2D(FILE *f, float *arr, int r, int c)
{
int i,j;
for(i=0; i<r; i++){
for(j=0; j<c; j++){
fscanf( f, "%f", &arr[ (i*c) + j] );
}
}
}
void parse2DPadded(FILE *f, float *arr, int r, int c, int px, int py)
{
int i,j;
int wStep = c + 2*px;
int offset = py*wStep + px;
for(i=0; i<r; i++){
for(j=0; j<c; j++){
fscanf( f, "%f", &arr[ offset + (i*wStep) + j] );
}
}
}
void flip_kernel(float * arr, int r, int c)
{
float f;
int i,j;
int R = r-1;
int C = c-1;
for(i=0; i<=r/2; i++){
for(j=0; j<c; j++){
f = arr[i*c +j];
arr[i*c +j] = arr[(R-i)*c + (C-j)];
arr[(R-i)*c + (C-j)] = f;
}
}
}
|
14,327 | /*****************************************************************************
* \file calculateForce.h
* \author Christopher Minar
* \brief Declaration of the kernels to solve the structure equation
* Curvilinear immersed boundary method for simulating fluid structure interaction with complex 3D rigid bodies
Iman Borazjani, Liang Geb, Fotis Sotiropoulos
*/
//not sure if needed
__global__ \
void solveStructureHost()
{
/*
int bodyIdx = threadIdx.x+blockIdx.x*blockDim.x;
if (bodyIdx >= totalPoints)
return;
*/
}
|
14,328 |
#include "TimerGPU.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
struct CUDAEventTimer
{
cudaEvent_t start;
cudaEvent_t stop;
};
TimerGPU::TimerGPU()
{
_timer = new CUDAEventTimer();
}
TimerGPU::~TimerGPU() { }
void TimerGPU::StartCounter()
{
cudaEventCreate(&((*_timer).start));
cudaEventCreate(&((*_timer).stop));
cudaEventRecord((*_timer).start,0);
}
float TimerGPU::GetCounter()
{
float time;
cudaEventRecord((*_timer).stop, 0);
cudaEventSynchronize((*_timer).stop);
cudaEventElapsedTime(&time,(*_timer).start,(*_timer).stop);
return time;
}
|
14,329 | #include "includes.h"
__global__ void cudaSReduceIndex_kernel( const unsigned int inputSize, const unsigned int inputBatchOffset, const unsigned int outputBatchOffset, const float* valueThreshold, const float* inputs, int* outputMap, float* scores)
{
const int batchPos = blockIdx.z;
const int clsPos = blockIdx.y;
const int index = (threadIdx.x & 0x1f) + blockIdx.x*blockDim.x;
const int inputIndex = index
+ inputSize*clsPos
+ batchPos*inputBatchOffset;
const int outputIndex = index
+ inputSize*clsPos
+ batchPos*outputBatchOffset;
if(index < inputSize)
{
float value = inputs[inputIndex];
if(value >= valueThreshold[clsPos])
{
outputMap[outputIndex] = index;
scores[outputIndex] = value;
}
else
{
outputMap[outputIndex] = -1;
scores[outputIndex] = -1.0;
}
}
} |
14,330 | //
// Created by root on 2020/12/1.
//
#include "stdio.h"
#include "stdlib.h"
#include "cuda_runtime.h"
#define W 32
#define H 32
#define D 32
#define TX 8
#define TY 8
#define TZ 8
int divUp(int a, int b) {
return (a + b - 1) / b;
}
__device__ float distance(int c, int r, int s, float3 pos) {
return sqrtf((c - pos.x) * (c - pos.x) + (r - pos.y) * (r - pos.y) + (s - pos.z) * (s - pos.z));
}
__global__ void distanceKernel(float *d_out, int w, int h, int d, float3 pos) {
int c = blockDim.x * blockIdx.x + threadIdx.x;
int r = blockDim.y * blockIdx.y + threadIdx.y;
int s = blockDim.z * blockIdx.z + threadIdx.z;
int i = c + r * w + s * w * h;
if (c >= w || r >= h || s >= d) {
return;
}
d_out[i] = distance(c, r, s, pos);
}
int main() {
float *out = (float *) malloc(W * H * D * sizeof(float ));
float *d_out;
cudaMalloc(&d_out, W * H * D * sizeof(float ));
float3 pos = {0.0f, 0.0f, 0.0f};
dim3 block(TX, TY, TZ);
dim3 grid(divUp(W, TX), divUp(H, TY), divUp(D, TZ));
distanceKernel<<<grid, block>>>(d_out, W, H, D, pos);
cudaMemcpy(out, d_out, W * H * D * sizeof(float ), cudaMemcpyDeviceToHost);
cudaFree(d_out);
free(out);
return 0;
} |
14,331 | /*
World using CUDA
**
** The string "Hello World!" is mangled then restored using a common CUDA idiom
**
** Byron Galbraith
** 2009-02-18
*/
#include <cuda.h>
#include <stdio.h>
// Prototypes
extern "C" __global__ void helloWorld(char*);
// Host function
int
main(int argc, char** argv)
{
int i;
// desired output
char str[] = "Hello World!";
// mangle contents of output
// the null character is left intact for simplicity
for(i = 0; i < 12; i++)
str[i] -= i;
// allocate memory on the device
char *d_str;
size_t size = sizeof(str);
cudaMalloc((void**)&d_str, size);
// copy the string to the device
cudaMemcpy(d_str, str, size, cudaMemcpyHostToDevice);
// set the grid and block sizes
dim3 dimGrid(2); // one block per word
dim3 dimBlock(6); // one thread per character
// invoke the kernel
helloWorld<<< dimGrid, dimBlock >>>(d_str);
// retrieve the results from the device
cudaMemcpy(str, d_str, size, cudaMemcpyDeviceToHost);
// free up the allocated memory on the device
cudaFree(d_str);
// everyone's favorite part
printf("%s\n", str);
return 0;
}
// Device kernel
__global__ void
helloWorld(char* str)
{
// determine where in the thread grid we are
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// unmangle output
str[idx] += idx;
}
|
14,332 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void collateSegments_gpu(int * src, int * scanResult, int * output, int numEdges) {
/*YOUR CODE HERE*/
}
|
14,333 | //This program uses the host CURAND API.
/*
rm timing_curand_tpb512.csv
for size in 1 10 100 10000 100000 1000000 10000000 100000000; do \
for name in "uniform_float" "uniform_double" "uniform_float_accurate" \
"uniform_double_accurate" "gaussian_float" "gaussian_double" "lognormal_float" \
"bits_int" "uniform_int" ; do \
./test_curand_philox.exe 100 ${size} ${name} >> timing_curand_tpb512.csv; \
done; \
done;
*/
#include <chrono>
#include <iostream>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <cuda.h>
#include <curand.h>
#include <unistd.h>
#define THREADS_PER_BLOCK 256
#define UNIFORM_ARGS_FLOAT -1.0f, 5.0f
#define UNIFORM_ARGS_DOUBLE -1.0, 5.0
#define UNIFORM_ARGS_INT -1, 5
#define GAUSSIAN_ARGS_FLOAT -1.0f, 5.0f
#define GAUSSIAN_ARGS_DOUBLE -1.0, 5.0
#define LOGNORMAL_ARGS_FLOAT -1.0f, 5.0f, 1.0f, 2.0f
#define LOGNORMAL_ARGS_DOUBLE -1.0, 5.0, 1.0, 2.0
#define BERNOULLI_ARGS 0.5f
#define POISSON_ARGS 0.5
// Value to initialize random number generator
#define SEED 123456ULL
#define CUDA_CALL(x) do { \
cudaError_t err = x; \
if (err!=cudaSuccess) { \
printf("Error %d at %s:%d\n",err,__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { \
curandStatus_t err = x; \
if (err !=CURAND_STATUS_SUCCESS) { \
printf("Error %d at %s:%d\n",err,__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
template<typename Type>
__global__ void range_transform(std::uint64_t n, Type* devData, Type a, Type b) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
devData[tid] = devData[tid] * (b - a) + a;
}
}
template<typename Type>
__global__ void range_transform_int(std::uint64_t n, std::uint32_t* uniformData, Type* devData, Type a, Type b) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
devData[tid] = a + uniformData[tid] % (b - a);
}
}
int generate(size_t n_iters, size_t n_points, std::string name = "") {
// Clocks
std::chrono::time_point<std::chrono::high_resolution_clock> start_tot;
std::chrono::time_point<std::chrono::high_resolution_clock> end_tot;
std::chrono::time_point<std::chrono::high_resolution_clock> start_kernel;
std::chrono::time_point<std::chrono::high_resolution_clock> end_kernel;
std::vector<std::chrono::duration<double>> times_vec;
std::vector<std::chrono::duration<double>> kernel_times_vec;
std::chrono::duration<double> tot_time;
std::chrono::duration<double> kernel_tot_time;
std::chrono::duration<double> mean_time;
std::chrono::duration<double> kernel_mean_time;
double var = 0.0;
double tot_stddev = 0.0;
double kernel_stddev = 0.0;
if (name == "uniform_float") {
using Type = float;
for (unsigned int i = 0; i < n_iters; ++i) {
start_tot = std::chrono::high_resolution_clock::now();
curandGenerator_t gen;
/* Create pseudo-random number generator */
#ifdef USE_PHILOX
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_PHILOX4_32_10));
#elif defined USE_MRG
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#else
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#endif
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen,
SEED));
unsigned int nblocks = (n_points + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
/* Generate n floats on device */
Type *devData, *hostData;
/* Allocate n floats on host */
hostData = (Type *)calloc(n_points, sizeof(Type));
/* Allocate n floats on device */
CUDA_CALL(cudaMalloc((void **)&devData, n_points*sizeof(Type)));
start_kernel = std::chrono::high_resolution_clock::now();
CURAND_CALL(curandGenerateUniform(gen, devData, n_points));
range_transform<Type><<<nblocks,THREADS_PER_BLOCK>>>(n_points, devData, UNIFORM_ARGS_FLOAT);
cudaError_t status = cudaDeviceSynchronize();
end_kernel = std::chrono::high_resolution_clock::now();
kernel_times_vec.push_back(end_kernel - start_kernel);
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostData, devData, n_points * sizeof(float),
cudaMemcpyDeviceToHost));
/* Show result */
// for(unsigned int i = 0; i < 10; i++) {
// printf("-- %u: %1.4f ", i, hostData[i]);
// }
// printf("\n");
/* Cleanup */
CUDA_CALL(cudaFree(devData));
free(hostData);
end_tot = std::chrono::high_resolution_clock::now();
times_vec.push_back(end_tot - start_tot);
CURAND_CALL(curandDestroyGenerator(gen));
}
} else if (name == "uniform_double") {
using Type = double;
for (unsigned int i = 0; i < n_iters; ++i) {
start_tot = std::chrono::high_resolution_clock::now();
curandGenerator_t gen;
/* Create pseudo-random number generator */
#ifdef USE_PHILOX
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_PHILOX4_32_10));
#elif defined USE_MRG
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#else
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#endif
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen,
SEED));
unsigned int nblocks = (n_points + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
/* Generate n floats on device */
Type *devData, *hostData;
/* Allocate n floats on host */
hostData = (Type *)calloc(n_points, sizeof(Type));
/* Allocate n floats on device */
CUDA_CALL(cudaMalloc((void **)&devData, n_points*sizeof(Type)));
start_kernel = std::chrono::high_resolution_clock::now();
CURAND_CALL(curandGenerateUniformDouble(gen, devData, n_points));
// cudaDeviceSynchronize();
range_transform<Type><<<nblocks,THREADS_PER_BLOCK>>>(n_points, devData, UNIFORM_ARGS_DOUBLE);
cudaError_t status = cudaDeviceSynchronize();
end_kernel = std::chrono::high_resolution_clock::now();
kernel_times_vec.push_back(end_kernel - start_kernel);
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostData, devData, n_points * sizeof(float),
cudaMemcpyDeviceToHost));
/* Show result */
// for(unsigned int i = 0; i < 10; i++) {
// printf("-- %u: %1.4f ", i, hostData[i]);
// }
// printf("\n");
/* Cleanup */
CUDA_CALL(cudaFree(devData));
free(hostData);
CURAND_CALL(curandDestroyGenerator(gen));
end_tot = std::chrono::high_resolution_clock::now();
times_vec.push_back(end_tot - start_tot);
}
} else if (name == "gaussian_float") {
using Type = float;
for (unsigned int i = 0; i < n_iters; ++i) {
start_tot = std::chrono::high_resolution_clock::now();
curandGenerator_t gen;
/* Create pseudo-random number generator */
#ifdef USE_PHILOX
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_PHILOX4_32_10));
#elif defined USE_MRG
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#else
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#endif
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen,
SEED));
/* Generate n floats on device */
Type *devData, *hostData;
/* Allocate n floats on host */
hostData = (Type *)calloc(n_points, sizeof(Type));
/* Allocate n floats on device */
CUDA_CALL(cudaMalloc((void **)&devData, n_points*sizeof(Type)));
start_kernel = std::chrono::high_resolution_clock::now();
CURAND_CALL(curandGenerateNormal(gen, devData, n_points, GAUSSIAN_ARGS_FLOAT));
// cudaDeviceSynchronize();
cudaError_t status = cudaDeviceSynchronize();
end_kernel = std::chrono::high_resolution_clock::now();
kernel_times_vec.push_back(end_kernel - start_kernel);
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostData, devData, n_points * sizeof(float),
cudaMemcpyDeviceToHost));
/* Show result */
// for(unsigned int i = 0; i < 10; i++) {
// printf("-- %u: %1.4f ", i, hostData[i]);
// }
// printf("\n");
/* Cleanup */
CUDA_CALL(cudaFree(devData));
free(hostData);
end_tot = std::chrono::high_resolution_clock::now();
times_vec.push_back(end_tot - start_tot);
CURAND_CALL(curandDestroyGenerator(gen));
}
} else if (name == "gaussian_double") {
using Type = double;
for (unsigned int i = 0; i < n_iters; ++i) {
start_tot = std::chrono::high_resolution_clock::now();
curandGenerator_t gen;
/* Create pseudo-random number generator */
#ifdef USE_PHILOX
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_PHILOX4_32_10));
#elif defined USE_MRG
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#else
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#endif
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen,
SEED));
/* Generate n floats on device */
Type *devData, *hostData;
/* Allocate n floats on host */
hostData = (Type *)calloc(n_points, sizeof(Type));
/* Allocate n floats on device */
CUDA_CALL(cudaMalloc((void **)&devData, n_points*sizeof(Type)));
start_kernel = std::chrono::high_resolution_clock::now();
CURAND_CALL(curandGenerateNormalDouble(gen, devData, n_points, GAUSSIAN_ARGS_DOUBLE));
// cudaDeviceSynchronize();
cudaError_t status = cudaDeviceSynchronize();
end_kernel = std::chrono::high_resolution_clock::now();
kernel_times_vec.push_back(end_kernel - start_kernel);
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostData, devData, n_points * sizeof(float),
cudaMemcpyDeviceToHost));
/* Show result */
// for(unsigned int i = 0; i < 10; i++) {
// printf("-- %u: %1.4f ", i, hostData[i]);
// }
// printf("\n");
/* Cleanup */
CUDA_CALL(cudaFree(devData));
free(hostData);
end_tot = std::chrono::high_resolution_clock::now();
times_vec.push_back(end_tot - start_tot);
CURAND_CALL(curandDestroyGenerator(gen));
}
} else if (name == "lognormal_float") {
using Type = float;
for (unsigned int i = 0; i < n_iters; ++i) {
start_tot = std::chrono::high_resolution_clock::now();
curandGenerator_t gen;
/* Create pseudo-random number generator */
#ifdef USE_PHILOX
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_PHILOX4_32_10));
#elif defined USE_MRG
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#else
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#endif
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen,
SEED));
unsigned int nblocks = (n_points + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
/* Generate n floats on device */
Type *devData, *hostData;
/* Allocate n floats on host */
hostData = (Type *)calloc(n_points, sizeof(Type));
/* Allocate n floats on device */
CUDA_CALL(cudaMalloc((void **)&devData, n_points*sizeof(Type)));
start_kernel = std::chrono::high_resolution_clock::now();
CURAND_CALL(curandGenerateLogNormal(gen, devData, n_points, 0.0f, 1.0f));
range_transform<Type><<<nblocks,THREADS_PER_BLOCK>>>(n_points, devData, -1.0f, 5.0f);
cudaError_t status = cudaDeviceSynchronize();
end_kernel = std::chrono::high_resolution_clock::now();
kernel_times_vec.push_back(end_kernel - start_kernel);
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostData, devData, n_points * sizeof(float),
cudaMemcpyDeviceToHost));
/* Show result */
// for(unsigned int i = 0; i < 10; i++) {
// printf("-- %u: %1.4f ", i, hostData[i]);
// }
// printf("\n");
/* Cleanup */
CUDA_CALL(cudaFree(devData));
free(hostData);
end_tot = std::chrono::high_resolution_clock::now();
times_vec.push_back(end_tot - start_tot);
CURAND_CALL(curandDestroyGenerator(gen));
}
} else if (name == "bits_int") {
using Type = std::uint32_t;
for (unsigned int i = 0; i < n_iters; ++i) {
start_tot = std::chrono::high_resolution_clock::now();
curandGenerator_t gen;
/* Create pseudo-random number generator */
#ifdef USE_PHILOX
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_PHILOX4_32_10));
#elif defined USE_MRG
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#else
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#endif
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen,
SEED));
/* Generate n floats on device */
Type *devData, *hostData;
/* Allocate n floats on host */
hostData = (Type *)calloc(n_points, sizeof(Type));
/* Allocate n floats on device */
CUDA_CALL(cudaMalloc((void **)&devData, n_points*sizeof(Type)));
start_kernel = std::chrono::high_resolution_clock::now();
CURAND_CALL(curandGenerate(gen, devData, n_points));
cudaError_t status = cudaDeviceSynchronize();
end_kernel = std::chrono::high_resolution_clock::now();
kernel_times_vec.push_back(end_kernel - start_kernel);
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostData, devData, n_points * sizeof(float),
cudaMemcpyDeviceToHost));
/* Show result */
// for(unsigned int i = 0; i < 10; i++) {
// printf("-- %u: %1.4f ", i, hostData[i]);
// }
// printf("\n");
/* Cleanup */
CUDA_CALL(cudaFree(devData));
free(hostData);
end_tot = std::chrono::high_resolution_clock::now();
times_vec.push_back(end_tot - start_tot);
CURAND_CALL(curandDestroyGenerator(gen));
}
} else if (name == "uniform_int") {
using Type = std::int32_t;
for (unsigned int i = 0; i < n_iters; ++i) {
start_tot = std::chrono::high_resolution_clock::now();
curandGenerator_t gen;
/* Create pseudo-random number generator */
#ifdef USE_PHILOX
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_PHILOX4_32_10));
#elif defined USE_MRG
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#else
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_MRG32K3A));
#endif
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen,
SEED));
unsigned int nblocks = (n_points + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
/* Generate n floats on device */
std::uint32_t* tempData;
Type *devData, *hostData;
/* Allocate n floats on host */
hostData = (Type *)calloc(n_points, sizeof(Type));
/* Allocate n floats on device */
CUDA_CALL(cudaMalloc((void **)&tempData, n_points*sizeof(Type)));
CUDA_CALL(cudaMalloc((void **)&devData, n_points*sizeof(Type)));
start_kernel = std::chrono::high_resolution_clock::now();
CURAND_CALL(curandGenerate(gen, tempData, n_points));
cudaDeviceSynchronize();
range_transform_int<Type><<<nblocks,THREADS_PER_BLOCK>>>(n_points, tempData, devData, -1, 5);
cudaError_t status = cudaDeviceSynchronize();
end_kernel = std::chrono::high_resolution_clock::now();
kernel_times_vec.push_back(end_kernel - start_kernel);
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostData, devData, n_points * sizeof(float),
cudaMemcpyDeviceToHost));
/* Show result */
// for(unsigned int i = 0; i < 10; i++) {
// printf("-- %u: %1.4f ", i, hostData[i]);
// }
// printf("\n");
/* Cleanup */
CUDA_CALL(cudaFree(devData));
free(hostData);
end_tot = std::chrono::high_resolution_clock::now();
times_vec.push_back(end_tot - start_tot);
CURAND_CALL(curandDestroyGenerator(gen));
}
} else {
std::cout << "unknown distr_type\n";
return EXIT_SUCCESS;
}
// Get timings
for (auto t : times_vec) {
tot_time += t;
}
for (auto t : kernel_times_vec) {
kernel_tot_time += t;
}
mean_time = tot_time / float(n_iters);
kernel_mean_time = kernel_tot_time / float(n_iters);
for (auto t : times_vec) {
var += (t.count() - mean_time.count()) * (t.count() - mean_time.count());
}
var /= sizeof(times_vec);
tot_stddev = std::sqrt(var);
var = 0.0;
for (auto t : kernel_times_vec) {
var += (t.count() - kernel_mean_time.count()) * (t.count() - kernel_mean_time.count());
}
var /= sizeof(kernel_times_vec);
kernel_stddev = std::sqrt(var);
// Print
std::cout << name << ","
<< n_iters << ","
<< n_points << ","
<< tot_time.count() << ","
<< mean_time.count() << ","
<< tot_stddev << ","
<< kernel_tot_time.count() << ","
<< kernel_mean_time.count() << ","
<< kernel_stddev << std::endl;
return EXIT_SUCCESS;
}
int main(int argc, char *argv[])
{
if (argc != 4) {
std::cout << "useage: test_mkl_rng.exe <num_batches> <batch_size> <distr_type>\n";
return 0;
}
size_t n_iters = atoi(argv[1]);
size_t n_points = atoi(argv[2]);
std::string name = std::string(argv[3]);
std::vector<std::string> names = {"uniform_float", "uniform_double",
"uniform_float_accurate", "uniform_double_accurate",
"gaussian_float", "gaussian_double", "lognormal_float",
"bits_int", "uniform_int"};
// for (auto name : names) {
// generate(n_iters, n_points, name);
// }
if ((name == "gaussian_float" || name == "gaussian_double"
|| name == "lognormal_float") && n_points == 1)
n_points = 2;
generate(n_iters, n_points, name);
return EXIT_SUCCESS;
}
|
14,334 | #include<stdio.h>
#include<iostream>
#include<fstream>
#include<string>
#include<sstream>
#include<ctime>
#define MAX_M 500000
using namespace std;
#define CUDA_CALL(x) do { cudaError_t err=(x); \
if(err!=cudaSuccess) { \
printf("Error %s at %s: %d",cudaGetErrorString(err),__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
__device__ int getProcessCount(const int nodes){
/*
In case the number of threads is less than the
total number of nodes in the graph. In this case,
each thread handles more than one node, and the
exact number is given by this function
*/
int no_threads = gridDim.x*blockDim.x;
int tid = blockIdx.x*blockDim.x+threadIdx.x;
if (tid>=nodes)
return 0;
else if (tid< nodes % no_threads)
return (nodes+no_threads-1)/no_threads;
else
return nodes/no_threads;
}
__global__ void fixMatching(int* cmatch, int* rmatch,int *nodes)
{
/*
To handle any race conditions that may have arisen.
We don't explicitly prevent the race conditions, rather
opting to fix them at each iteration. But each iteration
guarantees at least one augmenting path hence the number of
iterations is bounded
*/
int tid=blockDim.x*blockIdx.x+threadIdx.x;
int process_cnt = getProcessCount(*nodes);
for(int i=0;i<process_cnt;i++){
int col_vertex=i*(gridDim.x*blockDim.x)+tid;
//Race condition
if(cmatch[rmatch[col_vertex]]!=col_vertex)
rmatch[col_vertex]=-1;
}
}
__global__ void initBfsArray(int* bfs_array, int* amatch,int* nodes){
/*
Kernel to initialize the BFS array.
Sets the node to -1 if already matched,
and to 0 if the node has not been matched yet
*/
int tid=blockDim.x*blockIdx.x+threadIdx.x;
int process_cnt = getProcessCount(*nodes);
for(int i=0;i<process_cnt;i++){
int col_vertex=i*(gridDim.x*blockDim.x)+tid;
if(amatch[col_vertex]>-1)
bfs_array[col_vertex]=-1;
else if(amatch[col_vertex]==-1)
bfs_array[col_vertex]=0;
}
}
__global__ void bfs(int* predecessor, int* bfs_level,int* bfs_array,int* xadj,int* adj,int *nodes,
int* rmatch, bool* vertex_inserted, bool* augmenting_path_found)
{
/*
Main kernel. Iterates through all the edges
of a particular node.
Any variable names of row and column are
from an old version where the two parts of the
bipartite graph were called rows and columns
*/
int tid=blockDim.x*blockIdx.x+threadIdx.x;
int process_cnt = getProcessCount(*nodes);
for(int i=0;i<process_cnt;i++)
{
int col_vertex=i*(gridDim.x*blockDim.x)+tid;
if(bfs_array[col_vertex]==*bfs_level)
{
for(int j=xadj[col_vertex];j<=xadj[col_vertex+1]-1;j++)
{
//Iterate through the neighbours of node
int neighbour_row=adj[j];
int col_match=rmatch[neighbour_row];
if(col_match>-1)
{
if(bfs_array[col_match]==-1)
{
//That is, col_match has not been
//considered thus far. So add it
//to the next level
*vertex_inserted=true;
bfs_array[col_match]=*bfs_level+1;
predecessor[neighbour_row]=col_vertex;
}
}
else
{
if(col_match==-1)
{ //Found a free vertex!
rmatch[neighbour_row]=-2;
predecessor[neighbour_row]=col_vertex;
*augmenting_path_found=true;
}
}
}
}
}
}
__global__ void alternate(int* cmatch, int* rmatch, int* nodes,int* predecessor)
{
/*
If an augmenting path ending at the vertex
has been found, iterate through the predecessor
array ie. traverse the augmenting path and alterate
the edges to augment it.
*/
int tid = blockIdx.x *blockDim.x+threadIdx.x;
int process_vent=getProcessCount(*nodes);
for(int i=0;i<process_vent;i++)
{
int row_vertex=i*(gridDim.x*blockDim.x)+tid;
if(rmatch[row_vertex]==-2)
{
while(row_vertex!=-1)
{
int matched_col=predecessor[row_vertex];
int matched_row=cmatch[matched_col];
if (matched_row!=-1)
if(predecessor[matched_row]==matched_col)
break;
cmatch[matched_col]=row_vertex;
rmatch[row_vertex]=matched_col;
row_vertex=matched_row;
}
}
}
}
int readFile(int* xadj, int* adj, int nodes){
int edges=0;
int p=0,n=0,i=0;
int index=0;
int flag1=0;
int readnode[2];
//CPT Time for File IO
clock_t cpub = clock();
string line;
ifstream myfile ("data3.txt");
/*
The file should be sorted numerically. use "sort -n"
1 2
3 4
is valid
3 4
1 2
is not valid
*/
nodes = nodes+1;
/*
FILE IO and conversion into compact adjacency list
One workaround done here is the increment of node by 1
for the file io. This is basically adding a phantom node
and removing it after. The data input file should also have
the line
<nodes+1> <nodes+1>
at the end. This was to make the file io and conversion easier.
The issue if this line doesn't exist can be explained with an
example. If nodes is 1000 and nodes 999, 998 do not have edges,
ie they do not turn up in the file, then they will not have
a legal value set for their xadj and adj.
*/
if (myfile.is_open()){
p=0;
index=0;
flag1=0;
while ( getline (myfile,line) ){
edges++;
stringstream S;
S<<line;
i=0;
while( S >> n ) {
readnode[i]=n;
i++;
}
if(!p)
xadj[p]=0;
while(p!=readnode[0]-1)
{
p++;
xadj[p]=index;
if(p>(nodes-1))
{
flag1=1;
break;
}
}
if(flag1)
break;
adj[index]=readnode[1]-1;
index++;
}
myfile.close();
}
clock_t cpue = clock();
double cpu_time = 1000* double(cpue - cpub) / CLOCKS_PER_SEC;
printf("FILE IO Time :%f ms\n",cpu_time);
//Removal of the phantom node and edge we added
nodes = nodes-1;
edges = edges-1;
return edges;
}
int main(){
int nodes= 999;
int edges= 0;
//Number of nodes a thread should handle
//Default is one
int nops = 1;
dim3 threads(8);
dim3 blocks((nodes+threads.x-1)/(threads.x*nops));
int* d_nodes;
cudaMalloc(&d_nodes,sizeof(int));
cudaMemcpy(d_nodes,&nodes,sizeof(int),cudaMemcpyHostToDevice);
// Various timers
cudaEvent_t start, stop,kernel_start,kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
cudaEventCreate(&start);
cudaEventCreate(&stop);
float all_time=0 ,kernel_time = 0;
cudaEventRecord(start,0);
//Adjacency list only of first part of bipartite graph
int *xadj=(int*)malloc(nodes*sizeof(int));
int *adj =(int*)malloc(MAX_M*sizeof(int));
//FILE IO returns edges, and pointers to xadj and adj
edges = readFile(xadj,adj,nodes);
int *amatch =(int*)malloc(nodes*sizeof(int));
int *bmatch =(int*)malloc(nodes*sizeof(int));
int *bfs_array =(int*)malloc(nodes*sizeof(int));
int bfs_level = 0;
memset(amatch,-1,sizeof(int)*nodes);
memset(bmatch,-1,sizeof(int)*nodes);
//Boolean flags for the loops
bool* d_augmenting_path_found;
bool* d_vertex_inserted;
bool* augmenting_path_found = (bool*)malloc(sizeof(bool));
bool* vertex_inserted = (bool*)malloc(sizeof(bool));
*augmenting_path_found = true;
cudaMalloc(&d_augmenting_path_found,sizeof(bool));
cudaMalloc(&d_vertex_inserted,sizeof(bool));
int *d_predecessor,*d_bfs_level,*d_bfs_array, *d_xadj, *d_adj, *d_amatch,*d_bmatch;
cudaMalloc(&d_amatch,sizeof(int)*nodes);
cudaMalloc(&d_bmatch,sizeof(int)*nodes);
cudaMalloc(&d_predecessor,sizeof(int)*nodes);
cudaMalloc(&d_bfs_level,sizeof(int)*nodes);
cudaMalloc(&d_bfs_array,sizeof(int)*nodes);
cudaMalloc(&d_xadj,sizeof(int)*(nodes+1));
cudaMalloc(&d_adj,sizeof(int)*edges);
cudaMemset(d_amatch,-1,sizeof(int)*nodes);
cudaMemset(d_bmatch,-1,sizeof(int)*nodes);
//Note the nodes + 1 here. This is so that the final edge doesn't
//access illegal memory
cudaMemcpy(d_xadj, xadj, sizeof(int)*(nodes+1),cudaMemcpyHostToDevice);
cudaMemcpy(d_adj,adj, sizeof(int)*edges,cudaMemcpyHostToDevice);
cudaEventRecord(kernel_start,0);
while (*augmenting_path_found){
/*
Main loop. While either an augmenting path was found or
a new vertex was inserted into the set of vertices for
consideration.
The program flow mainly involves the three kernels, apart
from the one used to initialize. Kernel bfs() is the most work
intensive one. alternate() is only relevant if an augmenting
path is found. fixMatching() is a support kernel for fixing
errors due to race conditions
*/
initBfsArray<<<blocks,threads>>> (d_bfs_array,d_amatch,d_nodes);
*vertex_inserted= true;
cudaMemset(d_bfs_level,0,sizeof(int));
bfs_level = 0;
while (*vertex_inserted){
//Reset flags
cudaMemset(d_vertex_inserted,false,sizeof(bool));
cudaMemset(d_augmenting_path_found,false,sizeof(bool));
bfs<<<blocks,threads>>> (d_predecessor,d_bfs_level,d_bfs_array,d_xadj,
d_adj,d_nodes,d_bmatch,d_vertex_inserted,d_augmenting_path_found);
cudaMemcpy(augmenting_path_found,d_augmenting_path_found,sizeof(bool),cudaMemcpyDeviceToHost);
cudaMemcpy(vertex_inserted,d_vertex_inserted,sizeof(bool),cudaMemcpyDeviceToHost);
if (*augmenting_path_found){
break;
}
bfs_level+=1;
cudaMemcpy(d_bfs_level,&bfs_level,sizeof(int),cudaMemcpyHostToDevice);
}
alternate<<<blocks,threads>>> (d_amatch,d_bmatch,d_nodes,d_predecessor);
fixMatching<<<blocks,threads>>> (d_amatch,d_bmatch,d_nodes);
}
cudaMemcpy(amatch,d_amatch, sizeof(int)*nodes,cudaMemcpyDeviceToHost);
cudaMemcpy(bmatch,d_bmatch, sizeof(int)*nodes,cudaMemcpyDeviceToHost);
cudaEventRecord(kernel_stop,0);
cudaEventSynchronize(kernel_stop);
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
//Final check of race conditions
int maxmat=0;
for(int i =0;i<nodes;++i){
if(amatch[i]!=-1)
if(bmatch[amatch[i]]==i)
maxmat++;
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&all_time, start, stop);
printf("\nOverall Time : %f ms\nKernel Time :%f ms\n",all_time,kernel_time);
cout << "Size of maximum matching is "<<maxmat<<endl;
cout << "Minimum size of fleet required is "<<(nodes-maxmat)<<endl;
free(xadj);
free(adj);
}
|
14,335 | #define LIMIT -999
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#define BLOCK_SIZE 16
#define ROWS 4096
#define COLS 4096
#define PENALTY 10
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
__device__ __host__ int maximum( int a, int b, int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void
needle_cuda_shared_1( int* reference,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx;
int b_index_y = i - 1 - bx;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) {
ref[ty][tx] = reference[index + cols * ty];
}
__syncthreads();
if (tx == 0) {
temp[tx][0] = matrix_cuda[index_nw];
}
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
__syncthreads();
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) {
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
}
__global__ void
needle_cuda_shared_2( int* reference,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx + block_width - i ;
int b_index_y = block_width - bx -1;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) {
ref[ty][tx] = reference[index + cols * ty];
}
__syncthreads();
if (tx == 0) {
temp[tx][0] = matrix_cuda[index_nw];
}
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
__syncthreads();
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++) {
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--) {
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) {
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
}
__global__ void
needle_cuda_noshr_1( int* reference,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx;
int b_index_y = i - 1 - bx;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols + 1 );
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx;
int t_index_y = m - tx;
int idx = index + t_index_y * cols + t_index_x;
matrix_cuda[idx] = maximum( matrix_cuda[idx-cols-1] + reference[idx],
matrix_cuda[idx - 1] - penalty,
matrix_cuda[idx - cols] - penalty);
}
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m -1;
int t_index_y = BLOCK_SIZE - tx - 1;
int idx = index + t_index_y * cols + t_index_x;
matrix_cuda[idx] = maximum( matrix_cuda[idx-cols-1] + reference[idx],
matrix_cuda[idx - 1] - penalty,
matrix_cuda[idx - cols] - penalty);
}
}
}
__global__ void
needle_cuda_noshr_2( int* reference,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx + block_width - i;
int b_index_y = block_width - bx -1;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols + 1 );
for( int m = 0 ; m < BLOCK_SIZE ; m++) {
if ( tx <= m ){
int t_index_x = tx;
int t_index_y = m - tx;
int idx = index + t_index_y * cols + t_index_x;
matrix_cuda[idx] = maximum( matrix_cuda[idx-cols-1] + reference[idx],
matrix_cuda[idx - 1] - penalty,
matrix_cuda[idx - cols] - penalty);
}
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--) {
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m -1;
int t_index_y = BLOCK_SIZE - tx - 1;
int idx = index + t_index_y * cols + t_index_x;
matrix_cuda[idx] = maximum( matrix_cuda[idx-cols-1] + reference[idx],
matrix_cuda[idx - 1] - penalty,
matrix_cuda[idx - cols] - penalty);
}
}
}
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
runTest( argc, argv);
return( 0);
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]);
fprintf(stderr, "\t<dimension> - x and y dimensions\n");
fprintf(stderr, "\t<penalty> - penalty(positive integer)\n");
exit(1);
}
void runTest( int argc, char** argv)
{
int max_rows, max_cols, penalty;
int *input_itemsets, *output_itemsets, *referrence;
int *matrix_cuda, *referrence_cuda;
int size;
// the lengths of the two sequences should be able to divided by 16.
// And at current stage max_rows needs to equal max_cols
max_rows = ROWS;
max_cols = COLS;
penalty = PENALTY;
if(max_rows%BLOCK_SIZE!=0 || max_cols%BLOCK_SIZE!=0) {
fprintf(stderr,"The dimension values must be a multiple of 16\n");
exit(1);
}
max_rows = max_rows + 1;
max_cols = max_cols + 1;
referrence = (int *)malloc( max_rows * max_cols * sizeof(int));
input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int));
output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int));
if (!input_itemsets) {
fprintf(stderr, "error: can not allocate memory");
}
srand( 7);
for (int i = 0 ; i < max_cols; i++){
for (int j = 0 ; j < max_rows; j++){
input_itemsets[i*max_cols+j] = 0;
}
}
printf("Start Needleman-Wunsch\n");
for( int i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets[i*max_cols] = rand() % 10 + 1;
}
for( int j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets[j] = rand() % 10 + 1;
}
for (int i = 1 ; i < max_rows; i++){
for (int j = 1 ; j < max_cols; j++){
referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]];
}
}
for( int i = 1; i< max_rows ; i++) {
input_itemsets[i*max_cols] = -i * penalty;
}
for( int j = 1; j< max_cols ; j++) {
input_itemsets[j] = -j * penalty;
}
size = max_cols * max_rows;
cudaMalloc((void**)& referrence_cuda, sizeof(int)*size);
cudaMalloc((void**)& matrix_cuda, sizeof(int)*size);
cudaMemcpy(referrence_cuda, referrence, sizeof(int) * size, cudaMemcpyHostToDevice);
cudaMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, cudaMemcpyHostToDevice);
dim3 dimGrid;
dim3 dimBlock(BLOCK_SIZE, 1);
int block_width = ( max_cols - 1 )/BLOCK_SIZE;
struct timeval tv1, tv2;
gettimeofday( &tv1, NULL);
#ifdef NOSHR /* No shared memory optimization */
printf("Processing top-left matrix\n");
//process top-left matrix
for( int i = 1 ; i <= block_width ; i++){
dimGrid.x = i;
dimGrid.y = 1;
cudaFuncSetCacheConfig("needle_cuda_noshr_1", cudaFuncCachePreferL1);
needle_cuda_noshr_1<<<dimGrid, dimBlock>>>(referrence_cuda, matrix_cuda, max_cols, penalty, i, block_width);
}
printf("Processing bottom-right matrix\n");
//process bottom-right matrix
for( int i = block_width - 1 ; i >= 1 ; i--){
dimGrid.x = i;
dimGrid.y = 1;
cudaFuncSetCacheConfig("needle_cuda_noshr_2", cudaFuncCachePreferL1);
needle_cuda_noshr_2<<<dimGrid, dimBlock>>>(referrence_cuda, matrix_cuda, max_cols, penalty, i, block_width);
}
#else
printf("Processing top-left matrix\n");
//process top-left matrix
for( int i = 1 ; i <= block_width ; i++){
dimGrid.x = i;
dimGrid.y = 1;
needle_cuda_shared_1<<<dimGrid, dimBlock>>>(referrence_cuda, matrix_cuda, max_cols, penalty, i, block_width);
}
printf("Processing bottom-right matrix\n");
//process bottom-right matrix
for( int i = block_width - 1 ; i >= 1 ; i--){
dimGrid.x = i;
dimGrid.y = 1;
needle_cuda_shared_2<<<dimGrid, dimBlock>>>(referrence_cuda, matrix_cuda, max_cols, penalty, i, block_width);
}
#endif
gettimeofday( &tv2, NULL);
double runtime = ((tv2.tv_sec + tv2.tv_usec/1000000.0)-(tv1.tv_sec + tv1.tv_usec/1000000.0));
printf("Runtime(seconds): %f\n", runtime);
cudaMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, cudaMemcpyDeviceToHost);
#ifdef OUTPUT
/*
printf("print traceback value GPU:\n");
for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 ) {
printf("%d ", output_itemsets[ i * max_cols + j]); //print the first element
}
if ( i == 0 && j == 0) {
break;
}
if ( i > 0 && j > 0 ){
nw = output_itemsets[(i - 1) * max_cols + j - 1];
w = output_itemsets[ i * max_cols + j - 1 ];
n = output_itemsets[(i - 1) * max_cols + j];
} else if ( i == 0 ){
nw = n = LIMIT;
w = output_itemsets[ i * max_cols + j - 1 ];
} else if ( j == 0 ){
nw = w = LIMIT;
n = output_itemsets[(i - 1) * max_cols + j];
} else{ }
traceback = maximum(nw, w, n);
printf("%d ", traceback);
if(traceback == nw ) {i--; j--; continue;}
else if(traceback == w ) {j--; continue;}
else if(traceback == n ) {i--; continue;}
else {}
}
printf("\n");
*/
int i, j;
for( i = 0; i < max_rows; i++) {
for( j = 0; j < max_cols; j++) {
printf("%d ", output_itemsets[i*max_cols+j]);
}
printf("\n");
}
#endif
cudaFree(referrence_cuda);
cudaFree(matrix_cuda);
}
|
14,336 | #include "includes.h"
__global__ void cudaIaccumulate_kernel(int* x, int* y, unsigned int size)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
x[i] += y[i];
}
} |
14,337 | #include <iostream>
using namespace std;
__global__
void fun(double *a, long N)
{
printf("inside fun()\n");
printf("a[] = %f\n", a[N-1]);
}
int main()
{
long i;
long N = 2000000000;
long size = N*sizeof(double);
double *a = new double[N];
double *a_d;
cout << "hello" << endl;
for(i=0; i<N; ++i)
a[i] = 3.14159265358979323;
cout << "assigned to a[]" << endl;
cudaMalloc(&a_d, size);
cout << "cudaMalloc(); done" << endl;
cudaMemcpy(a_d, a, size, cudaMemcpyHostToDevice);
cout << "cudaMemcpy(); done" << endl;
fun<<<1,1>>>(a_d, N);
cudaMemcpy(a, a_d, size, cudaMemcpyDeviceToHost);
delete[] a;
cudaFree(a_d);
}
|
14,338 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cuda.h>
const int INF = ((1 << 30) - 1);
const int V = 50010;
void input(char* inFileName);
void output(char* outFileName);
void block_FW(int B);
int ceil(int a, int b);
void cal(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height);
int n, m;
static int Dist[V][V];
int main(int argc, char* argv[]) {
input(argv[1]);
int B = 512;
block_FW(B);
output(argv[2]);
return 0;
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) {
Dist[i][j] = 0;
} else {
Dist[i][j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; ++i) {
fread(pair, sizeof(int), 3, file);
Dist[pair[0]][pair[1]] = pair[2];
}
fclose(file);
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (Dist[i][j] >= INF) Dist[i][j] = INF;
}
fwrite(Dist[i], sizeof(int), n, outfile);
}
fclose(outfile);
}
int ceil(int a, int b) { return (a + b - 1) / b; }
void block_FW(int B) {
int round = ceil(n, B);
for (int r = 0; r < round; ++r) {
printf("%d %d\n", r, round);
fflush(stdout);
/* Phase 1*/
cal(B, r, r, r, 1, 1);
/* Phase 2*/
cal(B, r, r, 0, r, 1);
cal(B, r, r, r + 1, round - r - 1, 1);
cal(B, r, 0, r, 1, r);
cal(B, r, r + 1, r, 1, round - r - 1);
/* Phase 3*/
cal(B, r, 0, 0, r, r);
cal(B, r, 0, r + 1, round - r - 1, r);
cal(B, r, r + 1, 0, r, round - r - 1);
cal(B, r, r + 1, r + 1, round - r - 1, round - r - 1);
}
}
void cal(
int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) {
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
for (int b_i = block_start_x; b_i < block_end_x; ++b_i) {
for (int b_j = block_start_y; b_j < block_end_y; ++b_j) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int block_internal_start_x = b_i * B;
int block_internal_end_x = (b_i + 1) * B;
int block_internal_start_y = b_j * B;
int block_internal_end_y = (b_j + 1) * B;
if (block_internal_end_x > n) block_internal_end_x = n;
if (block_internal_end_y > n) block_internal_end_y = n;
for (int i = block_internal_start_x; i < block_internal_end_x; ++i) {
for (int j = block_internal_start_y; j < block_internal_end_y; ++j) {
if (Dist[i][k] + Dist[k][j] < Dist[i][j]) {
Dist[i][j] = Dist[i][k] + Dist[k][j];
}
}
}
}
}
}
}
|
14,339 | #include <cstdio>
__global__ void print(void) { // Call GPU on CPU
printf("Hello GPU\n");
}
int main() {
printf("Hello CPU\n");
//print<<<1,1>>>();//Change the number
//foo<<<blocks, threads_per_block>>>();
print<<<1,4>>>();//2048 wont work; 1024 is OK
cudaDeviceSynchronize();
}
|
14,340 | #include "includes.h"
__global__ void compute_row_on_Gamma_matrix_kernel(int row_index, int vertex_index, int* indices, double* exp_V, double* N_ptr, int LD_N, double* G_ptr, int LD_G, double* row_ptr, int incr) {
// int l = threadIdx.x;
int l = blockIdx.x;
int i_index, j_index;
double delta;
i_index = indices[row_index];
j_index = indices[l];
if (j_index < vertex_index) {
delta = i_index == j_index ? 1 : 0;
row_ptr[l * incr] = (N_ptr[i_index + LD_N * j_index] * exp_V[l] - delta) / (exp_V[l] - 1.);
}
else
row_ptr[l * incr] = G_ptr[i_index + LD_G * (j_index - vertex_index)];
} |
14,341 | #include "includes.h"
using namespace std;
double iStart1, iStart2, iStart3a, iStart3b, iStart4a, iStart4b, iStart4c, iStart5;
double iElaps1=0, iElaps2=0, iElaps3a=0, iElaps3b=0, iElaps4=0, iElaps5=0;
// Hold configurations for Kmeans
struct Info {
int numPoints;
int dim;
int numCentroids;
int numRepeats;
int *belongs;
float **points;
float **centroids;
int thresholdLoops;
float thresholdFraction;
int threadPerBlock;
};
// ************* Utils ************* //
__global__ static void reduce(int *g_idata, int l1, int l2) {
extern __shared__ unsigned int sdata[];
unsigned int tid = threadIdx.x;
if (tid < l1) {
sdata[tid] = g_idata[tid];
} else {
sdata[tid] = 0;
}
__syncthreads();
// Parallel Reduction (l2 must be power of 2)
for (unsigned int s = l2 / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) {
g_idata[0] = sdata[0];
}
} |
14,342 | #include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <iostream>
__global__ void init_zero(double *d_x){
d_x[blockIdx.x] = 0.0;
}
__global__ void randomWalk(double *d_x, double *d_GPU){
d_x[blockIdx.x] = d_x[blockIdx.x] + 2.0*d_GPU[blockIdx.x]-1.0;
}
int main() {
using namespace std;
curandGenerator_t genGPU;
curandCreateGenerator(&genGPU, CURAND_RNG_PSEUDO_MTGP32);
curandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL);
const int n = 10000;
double GPU[n];
double x[n];
// Allocated memory of the GPU
double* d_GPU;
double* d_x;
cudaMalloc(&d_GPU, n*sizeof(double));
cudaMalloc(&d_x, n*sizeof(double));
init_zero<<<n, 1>>>(d_x);
for ( int i=0; i<100000; i++){
// Generate an array of random numbers in the GPU
curandGenerateUniformDouble(genGPU, d_GPU, n);
randomWalk <<< n,1 >>> (d_x, d_GPU);
}
// Copy from GPU to CPU
// cudaMemcpy(GPU, d_GPU, n*sizeof(double), cudaMemcpyDeviceToHost);
// cudaMemcpy(x, d_x, n*sizeof(double), cudaMemcpyDeviceToHost);
// for ( int i = 0; i < n; ++i ) {
// cout << GPU[i] << ' ' << x[i] << endl;
// }
curandDestroyGenerator(genGPU);
cudaFree(d_GPU);
cudaFree(d_x);
}
|
14,343 | // System includes
#include <stdio.h>
#include <assert.h>
#include <time.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <chrono>
using namespace std::chrono;
__global__ void gpu_add_fun(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
int local_add(int N, int *a, int *b, int *c) {
high_resolution_clock::time_point t1 = high_resolution_clock::now();
for (int i=0; i<N; i++) c[i] = a[i] + b[i];
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>( t2 - t1 ).count();
printf("\nLocal: Elapsed time %f: msec. ", duration/1000.0f);
long s = 0;
for (int i=0; i<N; i++) s += (c[i]);
return s;
}
int random_ints(int *x, int N) {
srand (time(NULL));
for (int i = 0; i<N; i++) x[i] = (rand() % 3 - 1);
return 0;
}
int gpu_add(int N, int m, int *a, int *b, int *c, int *c_gpu) {
// for measuring execution time
cudaError_t error;
cudaEvent_t start;
error = cudaEventCreate(&start);
cudaEvent_t stop;
error = cudaEventCreate(&stop);
int *d_a, *d_b, *d_c; // gpu variables
int size = N * sizeof(int); // allocation space size
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
error = cudaEventRecord(start, 0);
// Launch add() kernel on GPU with N blocks
gpu_add_fun<<<N/m,m>>>(d_a, d_b, d_c);
error = cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculate execution time
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
printf("Elapsed time %f: msec. ", msecTotal);
// Copy result back to host
cudaMemcpy(c_gpu, d_c, size, cudaMemcpyDeviceToHost);
// Variable cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
long s = 0;
for (int i=0; i<N; i++) s += (c_gpu[i]);
return s;
}
int main(void)
{
// timer
int N = 1<<15; //2^15
int size = N * sizeof(int);
int *a, *b, *c, *c_gpu; // local variables
long sum = 0;
// Alloc space for host copies of a, b, c and setup input values
a = new int[size]; random_ints(a, N);
b = new int[size]; random_ints(b, N);
c = new int[size];
c_gpu = new int[size];
sum = local_add(N, a, b, c);
printf("Sum diff: %ld\n", sum);
for (int i=1; i<=(1<<15); i*=2) {
printf("GPU i = %5d. ", i);
sum = gpu_add(N, i, a, b, c, c_gpu);
printf("Sum diff: %ld\n", sum);
}
// local variable cleanup
delete [] a; delete [] b; delete [] c;
return 0;
}
|
14,344 | // Shared memory.
extern __shared__ float sdata[];
/**
* Apply the relu function.
* @param n the number of elements.
* @param x the data.
* @return nothing.
*/
extern "C"
__global__ void relu(int n, float *x)
{
for (int i = blockIdx.x; i < n; i += gridDim.x) {
if (x[i] < 0) x[i] = 0;
}
}
/**
* Apply the sigmoid function.
* @param n the number of elements.
* @param x the data.
* @return nothing.
*/
extern "C"
__global__ void sigmoid(int n, float *x)
{
for (int i = blockIdx.x; i < n; i += gridDim.x) {
x[i] = 1 / (1 + expf(-x[i]));
}
}
/**
* Apply the softmax function.
* @param n the number of elements.
* @param x the data.
* @return nothing.
*/
__device__ void softMaxDevice(int n, float *x) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
sdata[threadIdx.x] = x[index];
__syncthreads();
float sum = 0;
for (int i = 0; i < blockDim.x; i++) {
sum += expf(sdata[i]);
}
x[index] = expf(sdata[threadIdx.x]) / sum;
}
/**
* Apply the softmax function.
* @param n the number of elements.
* @param x the data.
* @return nothing.
*/
extern "C"
__global__ void softMax(int n, float *x)
{
softMaxDevice(n, x);
}
/**
* Compute the linear derivative.
* @param n the number of elements.
* @param x the data.
* @return nothing.
*/
extern "C"
__global__ void noneDerivative(int n, float *x)
{
for (int i = blockIdx.x; i < n; i += gridDim.x) {
x[i] = 1;
}
}
/**
* Compute the relu derivative.
* @param n the number of elements.
* @param x the data.
* @return nothing.
*/
extern "C"
__global__ void reluDerivative(int n, float *x)
{
for (int i = blockIdx.x; i < n; i += gridDim.x) {
x[i] = (x[i] <= 0) ? 0 : 1;
}
}
/**
* Compute the softmax derivative.
* @param n the number of elements.
* @param x the data, i.e. the output buffer.
* @param g the gradient.
* @return nothing.
*/
extern "C"
__global__ void softMaxDerivative(int n, float *x, float *g)
{
softMaxDevice(n, x);
__syncthreads();
int index = blockIdx.x * blockDim.x + threadIdx.x;
sdata[threadIdx.x] = x[index];
__syncthreads();
float sum = 0;
for (int i = 0; i < blockDim.x; i++) {
int g_index = blockIdx.x * blockDim.x + i;
sum += sdata[i] * ((i == threadIdx.x ? 1 : 0) - sdata[threadIdx.x]) * g[g_index];
}
x[index] = sum;
}
/**
* Compute the sigmoid derivative.
* @param n the number of elements.
* @param x the data.
* @return nothing.
*/
extern "C"
__global__ void sigmoidDerivative(int n, float *x)
{
for (int i = blockIdx.x; i < n; i += gridDim.x) {
float sig = 1.0 / (1.0 + expf(-x[i]));
x[i] = sig * (1.0 - sig);
}
}
|
14,345 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <chrono>
#include <algorithm>
#include <vector>
//using sys_clock = std::chrono::system_clock;
int thrust_sequence()
{
thrust::device_vector<int> D_vec(10,1);
thrust::fill(D_vec.begin(), D_vec.begin()+7, 9);
thrust::host_vector<int> H_vec(D_vec.begin(),D_vec.begin()+5);
thrust::sequence(H_vec.begin(), H_vec.end(), 5, 2);
thrust::copy(H_vec.begin(), H_vec.end(), D_vec.begin());
int i = 0;
for(auto value : D_vec)
std::cout << "D[" << i++ << "]= " << value << std::endl;
}
int thrust_sort()
{
int current_h = 0, current_d = 0, exit = 0, limit = 1 << 24;
std::chrono::time_point<std::chrono::system_clock> t1, t2;
std::chrono::duration<double, std::milli> exec_time_ms;
thrust::host_vector<int> h_vec(limit);
thrust::generate(h_vec.begin(), h_vec.end(), rand);
thrust::device_vector<int> d_vec = h_vec;
t1 = std::chrono::system_clock::now();
thrust::sort(d_vec.begin(), d_vec.end());
t2 = std::chrono::system_clock::now();
exec_time_ms = t2 - t1;
std::cout << "thrust gpu sort: " << exec_time_ms.count() << "ms." << std::endl;
std::vector<int> stl_vec(h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), stl_vec.begin());
t1 = std::chrono::system_clock::now();
std::sort(stl_vec.begin(), stl_vec.end());
t2 = std::chrono::system_clock::now();
exec_time_ms = t2 - t1;
std::cout << "stl sort: " << exec_time_ms.count() << "ms." << std::endl;
}
struct functor
{
const float a;
functor(float _a) : a(_a) {}
__host__ __device__ float operator()(const float &x, const float &y) const
{
return a * x + y;
}
};
int operador()
{
const float A = 5;
const int size = 10;
thrust::host_vector<float> X(size), Y(size), Z(size);
thrust::sequence(X.begin(), X.end(), 10, 10);
thrust::sequence(Y.begin(), Y.end(), 1, 5);
thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), functor(A));
for(int i = 0; i < Y.size(); i++)
{
std::cout << "Y[" << i << "] = " << Y[i] << std::endl;
}
}
template <typename T>
struct square
{
__host__ __device__ float operator()(const T &x) const
{
return x*x;
}
};
int main ()
{
float x[4] = {1.0, 2.0, 3.0, 4.0};
thrust::device_vector<float> d_vec(x, x+4);
square<float> unary_op;
thrust::plus<float> binary_op;
float norm = std::sqrt(
thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0, binary_op)
);
std::cout << norm << std::endl;
}
|
14,346 | // Write a first sequential implementation (matmult gpu1()) of matrix multiplication on the
// GPU that uses only a single thread. It should work for all matrix sizes. Hints:
// – You need CUDA code to allocate memory on the GPU, transfer A and B to the
// GPU, transfer C back to the CPU, and free the allocated memory.
//
// Time your kernel for small matrix sizes and compare to the reference DGEMM on the CPU.
// matrix times matrix
// m represents the number of rows (the vertical length) of A and C,
// k represents the number of columns of A and the n. of rows of B,
// n represents the number of columns (the horizontal length) of B and C.
// ____k____ ____n____ ____n____
// | | | | | |
// m | A | X k | B | = m | C |
// | | | | | |
// --------- --------- ---------
__global__ void m6(int m, int n, int k, double *A, double *B, double *C) {
double sum;
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
extern __shared__ double two_blocks[];
__shared__ double* A_s;
A_s = &two_blocks[0];
__shared__ double* B_s;
B_s = &two_blocks[blockDim.x*blockDim.y];
int ii = threadIdx.y;
int jj = threadIdx.x;
const int blockdim = blockDim.x;
for (int w = 0; w < k; w += blockdim){
sum = 0.0;
A_s[ii*blockdim + jj] = A[i*k+jj+w];
B_s[ii*blockdim + jj] = B[j+ii*n+w*n];
__syncthreads();
for (int h = 0; h < blockdim; h++) {
sum += A_s[ii*blockdim + h] * B_s[h*blockdim + jj];
}
__syncthreads();
C[i*n + j] += sum;
}
}
extern "C" {
void matmult_gpu6(int m, int n, int k, double *A, double *B, double *C) {
double* d_A, * d_B, * d_C;
cudaSetDevice(2);
cudaMalloc((void**)&d_A, m*k * sizeof(double));
cudaMalloc((void**)&d_B, k*n * sizeof(double));
cudaMalloc((void**)&d_C, m*n * sizeof(double));
cudaMemcpy(d_A, A, m*k * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, k*n * sizeof(double), cudaMemcpyHostToDevice);
// Initialize the output matrix with zeroes.
cudaMemset(d_C, 0, m*n * sizeof(double));
int bs = 16;
dim3 blockDim(bs, bs);
dim3 gridDim( (m-1)/blockDim.x+1, (n-1)/blockDim.y+1 );
// https://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/
// dynamically "pass" the shared memory to the kernel function.
// Otherwise we should place some constants in the kernel function.
m6<<<gridDim, blockDim, (blockDim.x*blockDim.y * 2 * sizeof(double))>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(C, d_C, m*n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
}
|
14,347 | #include<iostream>
__global__ void myKernel(){
}
int main()
{
myKernel<<<1,1>>>();
std::cout<<"Hello World!\n";
return 0;
}
|
14,348 | /*
blockIdx: block index
threadIdx: thread index within block
blockDim: threads per block (2)
gridDim: blocks per launch (N/2)
*/
#include <stdio.h>
#define N 10
__global__ void sum(int *a,
int *b, int *c)
{
int i;
i = blockIdx.x * blockDim.x +
threadIdx.x;
c[i] = a[i] + b[i];
}
int main( void ) {
int host_a[N];
int host_b[N];
int host_c[N];
for (int i=0; i<N; i++) {
host_a[i] = i;
host_b[i] = i;
}
int *dev_a, *dev_b, *dev_c;
cudaMalloc(&dev_a, sizeof(int) * N);
cudaMalloc(&dev_b, sizeof(int) * N);
cudaMalloc(&dev_c, sizeof(int) * N);
cudaMemcpy(dev_a, host_a, sizeof(int) * N,
cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, host_b, sizeof(int) * N,
cudaMemcpyHostToDevice);
sum<<<N/2, 2>>>(dev_a, dev_b, dev_c);
cudaMemcpy(host_c, dev_c, sizeof(int) * N,
cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++) {
printf("%d ", host_c[i]);
}
printf("\n");
} |
14,349 | #include "includes.h"
__global__ void histo_kernel( unsigned char *buffer, long size, unsigned int *histo ) {
// calculate the starting index and the offset to the next
// block that each thread will be processing
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd( &histo[buffer[i]], 1 );
i += stride;
}
} |
14,350 | /*
Copyright 2017 the arraydiff authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common.cuh"
#include <cuda_runtime_api.h>
#include <math_constants.h>
#include <stdint.h>
#include <stdlib.h>
__global__ void block_softmax_fwd_f32(
uint32_t block_dim,
uint32_t num_blocks,
const float *x,
float *y)
{
__shared__ float cache[1024];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t i = tid + block * block_dim;
float x_i = 0.0f;
if (tid < block_dim && block < num_blocks) {
x_i = x[i];
cache[tid] = x_i;
} else {
cache[tid] = -CUDART_INF_F;
}
__syncthreads();
threadblock1024_reduce_max_f32(cache);
float max_logit = cache[0];
__syncthreads();
float z_i = 0.0f;
if (tid < block_dim && block < num_blocks) {
z_i = expf(x_i - max_logit);
cache[tid] = z_i;
} else {
cache[tid] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
float sum_factor = cache[0];
__syncthreads();
if (tid < block_dim && block < num_blocks) {
y[i] = z_i / sum_factor;
}
}
extern "C" void arraydiff_cuda_kernel_block_softmax_fwd_f32(
size_t block_dim,
size_t num_blocks,
const float *x,
float *y,
cudaStream_t stream)
{
// XXX: assert(block_dim <= 1024);
// FIXME(20151022): could make more efficient use of blocks but w/e.
block_softmax_fwd_f32<<<num_blocks, 1024, 0, stream>>>(
block_dim, num_blocks, x, y);
}
__global__ void block_softmax_tangent_fwd_f32(
uint32_t block_dim,
uint32_t num_blocks,
const float *px,
const float *x,
const float *py,
float *y)
{
__shared__ float cache[1024];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t idx = tid + block_dim * block;
float x_i = 0.0f;
if (tid < block_dim && block < num_blocks) {
x_i = px[idx];
cache[tid] = x_i;
} else {
cache[tid] = -CUDART_INF_F;
}
__syncthreads();
threadblock1024_reduce_max_f32(cache);
float max_logit = cache[0];
__syncthreads();
float z_i = 0.0f;
if (tid < block_dim && block < num_blocks) {
z_i = expf(x_i - max_logit);
cache[tid] = z_i;
} else {
cache[tid] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
float sum_factor = cache[0];
__syncthreads();
if (tid < block_dim && block < num_blocks) {
cache[tid] = z_i * x_i;
} else {
cache[tid] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
float weighted_sum_factor = cache[0];
__syncthreads();
if (tid < block_dim && block < num_blocks) {
y[idx] = py[idx] * (x[idx] - weighted_sum_factor / sum_factor);
}
}
extern "C" void arraydiff_cuda_kernel_block_softmax_tangent_fwd_f32(
size_t block_dim,
size_t num_blocks,
const float *px,
const float *x,
const float *py,
float *y,
cudaStream_t stream)
{
// XXX: assert(block_dim <= 1024);
// FIXME(20151022): could make more efficient use of blocks but w/e.
block_softmax_tangent_fwd_f32<<<num_blocks, 1024, 0, stream>>>(
block_dim, num_blocks, px, x, py, y);
}
__global__ void block_softmax_negentropy_loss_fwd_accumulate_f32(
uint32_t block_dim,
uint32_t num_blocks,
const float *y,
float *loss)
{
const float beta = 0.01f;
__shared__ float cache[1024];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t i = tid + block * block_dim;
float y_i = 0.0f;
float ent_i = 0.0f;
if (tid < block_dim && block < num_blocks) {
y_i = y[i];
if (y_i > 0.0f) {
ent_i = -y_i * logf(y_i);
}
cache[tid] = ent_i;
} else {
cache[tid] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (tid == 0 && block < num_blocks) {
float entropy = cache[0];
loss[block] += -beta * entropy;
}
}
extern "C" void arraydiff_cuda_kernel_block_softmax_negentropy_loss_fwd_accumulate_f32(
size_t block_dim,
size_t num_blocks,
const float *y,
float *loss,
cudaStream_t stream)
{
block_softmax_negentropy_loss_fwd_accumulate_f32<<<num_blocks, 1024, 0, stream>>>(
block_dim, num_blocks, y, loss);
}
__global__ void block_softmax_negentropy_loss_bwd_f32(
uint32_t block_dim,
uint32_t num_blocks,
const float *y,
const float *df,
float *dx)
{
const float beta = 0.01f;
__shared__ float cache[1024];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t i = tid + block * block_dim;
float y_i = 0.0f;
float ent_i = 0.0f;
if (tid < block_dim && block < num_blocks) {
y_i = y[i];
if (y_i > 0.0f) {
ent_i = -y_i * logf(y_i);
}
cache[tid] = ent_i;
} else {
cache[tid] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (tid < block_dim && block < num_blocks) {
float entropy = cache[0];
float diff_i = -beta * (ent_i - y_i * entropy);
dx[i] += df[block] * diff_i;
}
}
extern "C" void arraydiff_cuda_kernel_block_softmax_negentropy_loss_bwd_f32(
size_t block_dim,
size_t num_blocks,
const float *y,
const float *df,
float *dx,
cudaStream_t stream)
{
block_softmax_negentropy_loss_bwd_f32<<<num_blocks, 1024, 0, stream>>>(
block_dim, num_blocks, y, df, dx);
}
__global__ void softmax_nll_loss_fwd_f32(
uint32_t dim,
uint32_t batch_sz,
const float *y,
const uint32_t *t,
float *loss)
{
uint32_t batch_idx = threadIdx.x + blockDim.x * blockIdx.x;
if (batch_idx < batch_sz) {
uint32_t offset_i = t[batch_idx];
uint32_t idx = offset_i + dim * batch_idx;
loss[batch_idx] = -logf(y[idx]);
}
}
extern "C" void arraydiff_cuda_kernel_softmax_nll_loss_fwd_f32(
size_t dim,
size_t batch_sz,
const float *y,
const uint32_t *t,
float *loss,
cudaStream_t stream)
{
softmax_nll_loss_fwd_f32<<<(batch_sz+1024-1)/1024, 1024, 0, stream>>>(
dim, batch_sz, y, t, loss);
}
__global__ void softmax_nll_loss_bwd_f32(
uint32_t dim,
uint32_t batch_sz,
const float *y,
const uint32_t *t,
const float *df,
float *dx)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t i = idx % dim;
uint32_t batch_idx = idx / dim;
if (i < dim && batch_idx < batch_sz) {
uint32_t t_i = t[batch_idx];
dx[idx] += df[batch_idx] * (y[idx] - (float)(i == t_i));
}
}
extern "C" void arraydiff_cuda_kernel_softmax_nll_loss_bwd_f32(
size_t dim,
size_t batch_sz,
const float *y,
const uint32_t *t,
const float *df,
float *dx,
cudaStream_t stream)
{
uint32_t n = dim * batch_sz;
softmax_nll_loss_bwd_f32<<<(n+1024-1)/1024, 1024, 0, stream>>>(
dim, batch_sz, y, t, df, dx);
}
__global__ void block_softmax_kl2_loss_fwd_f32_kernel(
uint32_t block_dim,
uint32_t num_blocks,
const float *y,
const float *t,
float *loss)
{
__shared__ float cache[1024];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t idx = tid + block_dim * block;
if (tid < block_dim && block < num_blocks) {
float y_i = y[idx];
float t_i = t[idx];
float kl_i = 0.0f;
if (t_i > 0.0f) {
kl_i = t_i * (logf(t_i) - logf(y_i));
} else {
kl_i = -t_i * logf(y_i);
}
cache[tid] = kl_i;
} else {
cache[tid] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_max_f32(cache);
if (tid < block_dim && block < num_blocks) {
if (tid == 0) {
loss[block] = cache[0];
}
}
}
extern "C" void arraydiff_cuda_kernel_block_softmax_kl2_loss_fwd_f32(
size_t block_dim,
size_t num_blocks,
const float *y,
const float *t,
float *loss,
cudaStream_t stream)
{
block_softmax_kl2_loss_fwd_f32_kernel<<<num_blocks, 1024, 0, stream>>>(
block_dim, num_blocks, y, t, loss);
}
__global__ void softmax_kl2_loss_bwd_f32_kernel(
uint32_t dim,
uint32_t batch_sz,
const float *y,
const float *t,
const float *df,
float *dx)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t j = idx % dim;
uint32_t batch_idx = idx / dim;
if (j < dim && batch_idx < batch_sz) {
float y_i = y[idx];
float t_i = t[idx];
dx[idx] += df[batch_idx] * (y_i - t_i);
}
}
extern "C" void arraydiff_cuda_kernel_softmax_kl2_loss_bwd_f32(
size_t dim,
size_t batch_sz,
const float *y,
const float *t,
const float *df,
float *dx,
cudaStream_t stream)
{
uint32_t n = dim * batch_sz;
softmax_kl2_loss_bwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
dim, batch_sz, y, t, df, dx);
}
__global__ void block_softmax_tangent_kl2_loss_fwd_f32_kernel(
uint32_t block_dim,
uint32_t num_blocks,
const float *py,
const float *y,
const float *t,
float *loss)
{
__shared__ float cache[1024];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t idx = tid + block_dim * block;
if (tid < block_dim && block < num_blocks) {
float py_i = py[idx];
float y_i = y[idx];
float t_i = t[idx];
cache[tid] = -t_i * y_i / py_i;
} else {
cache[tid] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_max_f32(cache);
if (tid < block_dim && block < num_blocks) {
if (tid == 0) {
loss[block] = cache[0];
}
}
}
extern "C" void arraydiff_cuda_kernel_block_softmax_tangent_kl2_loss_fwd_f32(
size_t block_dim,
size_t num_blocks,
const float *py,
const float *y,
const float *t,
float *loss,
cudaStream_t stream)
{
block_softmax_tangent_kl2_loss_fwd_f32_kernel<<<num_blocks, 1024, 0, stream>>>(
block_dim, num_blocks, py, y, t, loss);
}
__global__ void softmax_tangent_kl2_loss_bwd_f32_kernel(
uint32_t dim,
uint32_t batch_sz,
const float *y,
const float *df,
float *dx)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t j = idx % dim;
uint32_t batch_idx = idx / dim;
if (j < dim && batch_idx < batch_sz) {
float y_i = y[idx];
dx[idx] += df[batch_idx] * y_i;
}
}
extern "C" void arraydiff_cuda_kernel_softmax_tangent_kl2_loss_bwd_f32(
size_t dim,
size_t batch_sz,
const float *y,
const float *df,
float *dx,
cudaStream_t stream)
{
uint32_t n = dim * batch_sz;
softmax_tangent_kl2_loss_bwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
dim, batch_sz, y, df, dx);
}
__global__ void softmax_lr_loss_fwd_f32_kernel(
uint32_t dim,
uint32_t batch_sz,
const float *y,
const uint32_t *index,
const float *t,
float *loss,
float lr_clip)
{
uint32_t batch_idx = threadIdx.x + blockDim.x * blockIdx.x;
if (batch_idx < batch_sz) {
uint32_t index_i = index[batch_idx];
float y_i = y[index_i + dim * batch_idx];
float t_i = t[batch_idx];
float lr_i = y_i / t_i;
loss[batch_idx] = min(lr_i, lr_clip);
}
}
extern "C" void arraydiff_cuda_kernel_softmax_lr_loss_fwd_f32(
size_t dim,
size_t batch_sz,
const float *y,
const uint32_t *index,
const float *t,
float *loss,
float lr_clip,
cudaStream_t stream)
{
softmax_lr_loss_fwd_f32_kernel<<<(batch_sz + 1024 - 1) / 1024, 1024, 0, stream>>>(
dim, batch_sz, y, index, t, loss, lr_clip);
}
__global__ void softmax_lr_loss_bwd_f32_kernel(
uint32_t dim,
uint32_t batch_sz,
const float *y,
const uint32_t *index,
const float *t,
const float *df,
float *dx,
float lr_clip)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t j = idx % dim;
uint32_t batch_idx = idx / dim;
if (j < dim && batch_idx < batch_sz) {
uint32_t index_i = index[batch_idx];
float delta_i = (float)(j == index_i);
float y_ij = y[idx];
float y_i = y[index_i + dim * batch_sz];
float t_i = t[batch_idx];
float lr_i = y_i / t_i;
if (lr_i < lr_clip) {
dx[idx] += df[batch_idx] * lr_i * (delta_i - y_ij);
}
}
}
extern "C" void arraydiff_cuda_kernel_softmax_lr_loss_bwd_f32(
size_t dim,
size_t batch_sz,
const float *y,
const uint32_t *index,
const float *t,
const float *df,
float *dx,
float lr_clip,
cudaStream_t stream)
{
uint32_t n = dim * batch_sz;
softmax_lr_loss_bwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
dim, batch_sz, y, index, t, df, dx, lr_clip);
}
|
14,351 | #include <stdio.h>
#include <stdlib.h>
#define N 10
#define THREADS 10
#define BLOCKS 1
__global__ void reversing(double* arr, double* res, int size){
int index = threadIdx.x + blockIdx.x * blockDim.x;
res[size - 1 - index] = arr[index];
}
__global__ void transpose(double* mat, double* res, int ncol){
int index = threadIdx.x + blockIdx.x * blockDim.x;
// global index associated to matrix
int r = index / ncol;
int c = index % ncol;
// transposition
res[c * ncol + r] = mat[index];
}
__global__ void mm_mult(double* A, double* B, double* C, int nr_A, int nc_A, int nc_B){
int index = threadIdx.x + blockIdx.x * blockDim.x;
// global index associated to matrix
int r = index / nc_B;
int c = index % nc_B;
int k;
double tmp = 0.;
for(k = 0; k < nc_A; k++)
tmp += A[k + r * nc_A] * B[c + k * nc_A];
C[index] = tmp;
}
int main(int argc, char** argv){
int i, j, i_tmp;
double* arr = (double*)malloc(N * sizeof(double));
double *dev_arr, *res;
double* mat = (double*)malloc(N * N * sizeof(double));
double *dev_mat, *res_mat;
double *identity = (double*)malloc(N * N * sizeof(double));
double *dev_id, *dev_prod;
if(THREADS * BLOCKS != N){
printf("\n\tTHREADS * BLOCK must be equal to N\n\tExit\n");
return 0;
}
cudaMalloc(&dev_arr, N * sizeof(double));
cudaMalloc(&res, N * sizeof(double));
for(i = 0; i < N; i++)
arr[i] = (double) i;
cudaMemcpy(dev_arr, arr, N * sizeof(double), cudaMemcpyHostToDevice);
reversing<<<THREADS, BLOCKS>>>(dev_arr, res, N);
cudaMemcpy(arr, res, N * sizeof(double), cudaMemcpyDeviceToHost);
for(i = 0; i < N; i++)
printf("\t%lg", arr[i]);
printf("\n");
cudaFree(dev_arr);
cudaFree(res);
free(arr);
printf("\n\tTRANSPOSE\n\n");
for(i = 0; i < N; i++){
i_tmp = i * N;
for(j = 0; j < N; j++){
mat[j + i_tmp] = j + i_tmp;
if(i != j)
identity[i_tmp + j] = 0.;
else
identity[i_tmp + j] = 1.;
}
}
for(i = 0; i < N; i++){
i_tmp = i * N;
for(j = 0; j < N; j++)
printf("\t%lg", mat[i_tmp + j]);
printf("\n");
}
cudaMalloc(&dev_mat, N * N * sizeof(double));
cudaMalloc(&res_mat, N * N * sizeof(double));
cudaMalloc(&dev_id, N * N * sizeof(double));
cudaMalloc(&dev_prod,N * N * sizeof(double));
cudaMemcpy(dev_mat, mat, N * N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_id, identity, N * N * sizeof(double), cudaMemcpyHostToDevice);
transpose<<<THREADS, THREADS>>>(dev_mat, res_mat, N);
mm_mult<<<THREADS, THREADS>>>(res_mat, dev_id, dev_prod, N, N, N);
cudaMemcpy(mat, dev_prod, N * N * sizeof(double), cudaMemcpyDeviceToHost);
printf("\n\n");
for(i = 0; i < N; i++){
i_tmp = i * N;
for(j = 0; j < N; j++)
printf("\t%lg", mat[i_tmp + j]);
printf("\n");
}
printf("\n");
return 0;
}
|
14,352 | #include <cuda_runtime.h>
const int CachDim = 64;
__global__ void boxBlurSharedKernel(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int borderSize)
{
// get the position for the current thread
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const int tid = y * width + x;
__shared__ uchar4 cache[CachDim][CachDim];
uchar4 value = {127, 127, 127, 255};
int meanLength = 2 * borderSize + 1;
int denom = meanLength * meanLength;
for(int cy = threadIdx.y; cy < blockDim.y + meanLength; cy += blockDim.y)
for(int cx = threadIdx.x; cx < blockDim.x + meanLength; cx += blockDim.x)
{
int tmpX = (blockIdx.x * blockDim.x) + cx - borderSize;
int tmpY = (blockIdx.y * blockDim.y) + cy - borderSize;
if(tmpX >= 0 && tmpX < width
&& tmpY >= 0 && tmpY < height)
{
cache[cy][cx] = pDataIn[tmpY * width + tmpX];
}
}
__syncthreads();
if(x >= borderSize && y >= borderSize
&& x + borderSize < width && y + borderSize < height)
{
int3 sum = {0, 0, 0};
for(int cy = 0; cy < meanLength; cy++)
for(int cx = 0; cx < meanLength; cx++)
{
uchar4 locValue = cache[threadIdx.y + cy][threadIdx.x + cx];
sum.x += locValue.x;
sum.y += locValue.y;
sum.z += locValue.z;
}
value.x = (unsigned char)(sum.x / denom);
value.y = (unsigned char)(sum.y / denom);
value.z = (unsigned char)(sum.z / denom);
}
if(x < width && y < height)
{
// write the value back to the global memory
pDataOut[tid] = value;
}
}
void boxBlurShared(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int blurSize, int blockDimX, int blockDimY)
{
// allocate device memory
unsigned int mem_size = sizeof(uchar4) * width * height;
uchar4* pDevDataIn;
uchar4* pDevDataOut;
cudaError_t res;
res = cudaMalloc((void **) &pDevDataIn, mem_size);
res = cudaMalloc((void **) &pDevDataOut, mem_size);
// copy results from host to device
res = cudaMemcpy(pDevDataIn, pDataIn, mem_size, cudaMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(blockDimX, blockDimY);
dim3 numBlocks(width / threadsPerBlock.x + 1, height / threadsPerBlock.y + 1);
// run the cuda kernel
boxBlurSharedKernel<<<numBlocks, threadsPerBlock>>>(pDevDataIn, pDevDataOut,
width, height, blurSize);
// copy results from device to host
res = cudaMemcpy(pDataOut, pDevDataOut, mem_size, cudaMemcpyDeviceToHost);
// cleanup memory
res = cudaFree(pDevDataIn);
res = cudaFree(pDevDataOut);
}
|
14,353 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_SIZE 16
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use shared memory for tiling
*
********************************************************************/
// INSERT KERNEL CODE HERE
__shared__ float ds_M[TILE_SIZE][TILE_SIZE];
__shared__ float ds_N[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
float Cvalue = 0.0;
// Loop over the M and N tiles required to compute the P element
for (int p = 0; p < (k-1)/TILE_SIZE+1; ++p) {
// Collaborative loading of M and N tiles into shared memory
if (Row<m && p*TILE_SIZE+tx<k)
ds_M[ty][tx] = A[Row*k + p*TILE_SIZE+tx];
else
ds_M[ty][tx] = 0.0;
if (p*TILE_SIZE+ty<k && Col<n)
ds_N[ty][tx] = B[(p*TILE_SIZE+ty)*n + Col];
else
ds_N[ty][tx] = 0.0;
__syncthreads();
for (int i=0; i<TILE_SIZE; ++i) Cvalue += ds_M[ty][i] * ds_N[i][tx];
__syncthreads();
}
if (Row<m && Col<n) C[Row*n+Col] = Cvalue;
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
//INSERT CODE HERE
dim3 dimGrid((n-1)/TILE_SIZE + 1, (m-1)/TILE_SIZE + 1, 1);
dim3 dimBlock(TILE_SIZE, TILE_SIZE, 1);
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
mysgemm <<< dimGrid, dimBlock >>>(m, n, k, A, B, C);
} |
14,354 | //
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <cuda_runtime.h>
#define N 512
//
// kernel routine
//
__global__ void add_threads(int *a, int *b, int *c)
{
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
//
// main code
//
//int main(int argc, char **argv)
int main(void)
{
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = N * sizeof(int);
time_t t;
printf("DEBUG: Size of 'int' type: %lu\n", sizeof(int));
srand((unsigned) time(&t));
// initialise card
//cutilDeviceInit(argc, argv);
// allocate device copies of a, b, c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
for (int i=0; i < N; ++i)
{
#if 0
a[i] = rand()%N;
b[i] = rand()%N;
#else
a[i] = 5;
b[i] = 5;
#endif
}
printf("DEBUG: a[%d]=%d, b[%d]=%d\n",0, a[0], 0, b[0]);
printf("DEBUG: a[%d]=%d, b[%d]=%d\n",N-1, a[N-1], N-1, b[N-1]);
// copy inputs to device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice );
printf("INFO: Launching CUDA kernel: add_block with blocks=%d, threads=%d...", 1, N);
// launch add() kernel with N parallel blocks
add_threads<<< 1, N >>>( dev_a, dev_b, dev_c );
printf(" Done\n");
// copy device result back to host copy of c
cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost );
#if 1
for (int i=0; i<N; i++)
{
if (fabs(a[i]+b[i]-c[i]) > 1e-5)
{
printf("ERROR: *** FAILED ***\n");
break;
} else
{
if (i == (N -1))
printf("INFO: PASS\n");
}
//printf("Checking results %d\n", a[i]+b[i]-c[i]);
}
#endif
#if 1
printf("DEBUG: a[0]=%d, b[0]=%d, c[0]=%d\n", a[0], b[0], c[0]);
printf("DEBUG: a[%d]=%d, b[%d]=%d, c[%d]=%d\n", N-1, a[N-1], N-1, b[N-1], N-1, c[N-1]);
//printf("Checking results %d\n", a[0]+b[0]-c[0]);
#endif
free( a );
free( b );
free( c );
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
cudaDeviceReset();
return 0;
}
|
14,355 | #include "includes.h"
__global__ void chooseSample ( const int nDB, const int si, const float *EE, float *EBV ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nDB ) {
EBV[i] = EE[i+si*nDB];
}
} |
14,356 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <chrono>
// Input size
int const BATCH = 1;
int const DEPTH = 3;
int const WIDTH = 1000;
int const LENGTH = 1000;
// Kernel characteristics
int const ZPADX = 0;
int const ZPADY = 0;
int const STRIDEX = 1;
int const STRIDEY = 1;
int const CONV_RECP_SIZEX = 3;
int const CONV_RECP_SIZEY = 3;
int const NUM_OF_KERNELS = 50;
int const convLayerSizeX = ((WIDTH - CONV_RECP_SIZEX + 2 * ZPADX) / STRIDEX + 1);
int const convLayerSizeY = ((LENGTH - CONV_RECP_SIZEY + 2 * ZPADY) / STRIDEY + 1);
__global__
void conv(float* inputm, float* weights, float* outputm )
{
int ROW = (blockIdx.y * blockDim.y + threadIdx.y) % (convLayerSizeX * convLayerSizeY);
int COL = blockIdx.x * blockDim.x + threadIdx.x;
int DEP = (blockIdx.y * blockDim.y + threadIdx.y) / (convLayerSizeX * convLayerSizeY);
for (int i = 0; i < DEPTH; i++)
{
for (int j = 0; j < CONV_RECP_SIZEY; j++)
{
for (int l = 0; l < CONV_RECP_SIZEX; l++)
{
outputm[DEP * convLayerSizeY * convLayerSizeX + ROW * convLayerSizeX + COL] += inputm[i * WIDTH * LENGTH + (j + ROW * STRIDEY) * WIDTH + (l + COL * STRIDEX)] * weights[DEP * DEPTH * CONV_RECP_SIZEX * CONV_RECP_SIZEY + i * CONV_RECP_SIZEX * CONV_RECP_SIZEY + j * CONV_RECP_SIZEX + l];
}
}
}
}
int main()
{
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaError_t cudaStatus;
float* hinputMatrix = new float [BATCH * DEPTH * LENGTH * WIDTH];
float* hconvLayer = new float[NUM_OF_KERNELS * convLayerSizeY * convLayerSizeX];
float* hconvLayerWeights = new float[NUM_OF_KERNELS * DEPTH * CONV_RECP_SIZEY * CONV_RECP_SIZEX];
//GENERATING INPUT
//std::cout << "Inputs:\n";
float x = 0;
for (int b = 0; b < BATCH; b++)
{
for (int c = 0; c < DEPTH; c++)
{
//std::cout << "slice: " << c + 1 << "\n";
for (int j = 0; j < LENGTH; j++)
{
for (int i = 0; i < WIDTH; i++)
{
hinputMatrix[b * DEPTH * LENGTH *WIDTH + c * LENGTH * WIDTH + j * WIDTH + i] = x;
x += 0.5;
//std::cout << std::setprecision(1) << std::fixed << hinputMatrix[b * DEPTH * LENGTH *WIDTH + c * LENGTH * WIDTH + j * WIDTH + i]<<" ";//<< " ("<< b * DEPTH * LENGTH *WIDTH + c * LENGTH * WIDTH + j * WIDTH + i<<") ";
}
//std::cout << "\n";
}
//std::cout << "\n";
}
//std::cout << "\n";
}
//std::cout << "Weights:\n";
float w = 0;
for (int d = 0; d < NUM_OF_KERNELS; d++)
{
//std::cout << "kernel: " << d + 1 << "\n";
for (int c = 0; c < DEPTH; c++)
{
//std::cout << "slice: " << c + 1 << "\n";
for (int j = 0; j < CONV_RECP_SIZEY; j++)
{
for (int i = 0; i < CONV_RECP_SIZEX; i++)
{
hconvLayerWeights[d * DEPTH * CONV_RECP_SIZEX * CONV_RECP_SIZEY + c * CONV_RECP_SIZEX * CONV_RECP_SIZEY + j * CONV_RECP_SIZEX + i] = w;
w += 0.1;
//std::cout << std::setprecision(1) << std::fixed << hconvLayerWeights[d * DEPTH * CONV_RECP_SIZEX * CONV_RECP_SIZEY + c * CONV_RECP_SIZEX * CONV_RECP_SIZEY + j * CONV_RECP_SIZEX + i] << " ";// " (" << d * DEPTH * CONV_RECP_SIZEX * CONV_RECP_SIZEY + c * CONV_RECP_SIZEX * CONV_RECP_SIZEY + j * CONV_RECP_SIZEX + i << ") ";
}
//std::cout << "\n";
}
//std::cout << "\n";
}
//std::cout << "\n";
}
//
float* dinputMatrix;
float* dconvLayerWeights;
float* dconvLayer;
cudaMalloc((void **)&dconvLayer, (NUM_OF_KERNELS * convLayerSizeY * convLayerSizeX) * sizeof(float));
cudaMalloc((void **)&dconvLayerWeights, (NUM_OF_KERNELS * DEPTH * CONV_RECP_SIZEY * CONV_RECP_SIZEX) * sizeof(float));
cudaMalloc((void **)&dinputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float));
cudaMemcpy(dinputMatrix, hinputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dconvLayerWeights, hconvLayerWeights, (NUM_OF_KERNELS * DEPTH * CONV_RECP_SIZEY * CONV_RECP_SIZEX) * sizeof(float), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(convLayerSizeX, convLayerSizeY * NUM_OF_KERNELS);
dim3 blocksPerGrid(1, 1);
if (NUM_OF_KERNELS * convLayerSizeY * convLayerSizeX> 1024) {
threadsPerBlock.x = 32;
threadsPerBlock.y = 32;
blocksPerGrid.x = ceil(double(convLayerSizeX) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(convLayerSizeY * NUM_OF_KERNELS) / double(threadsPerBlock.y));
}
cudaEventRecord(start, 0);
conv<<< blocksPerGrid, threadsPerBlock >>> (dinputMatrix, dconvLayerWeights, dconvLayer);
cudaStatus = cudaEventRecord(stop, 0);
cudaStatus = cudaEventSynchronize(stop);
cudaDeviceSynchronize();
cudaStatus = cudaEventElapsedTime(&time, start, stop);
time = time * 1000;
cudaMemcpy(hconvLayer, dconvLayer, (NUM_OF_KERNELS * convLayerSizeY * convLayerSizeX) * sizeof(float), cudaMemcpyDeviceToHost);
// PRINTING RESULTS
std::cout << "Results:\n" << "Convolution matrix:\n";
for (int k = 0; k < NUM_OF_KERNELS; k++)
{
//std::cout << "slice: " << k + 1 << "\n";
for (int j = 0; j < convLayerSizeY; j++)
{
for (int i = 0; i < convLayerSizeX; i++)
{
//std::cout << std::setprecision(2) << std::fixed << hconvLayer[k * convLayerSizeY * convLayerSizeX + j * convLayerSizeX + i] << " ";
}
//std::cout << "\n";
}
//std::cout << "\n";
}
// CLEAN UP
printf("Time for the kernel: %f us\n", time);
cudaFree(dconvLayerWeights);
cudaFree(dinputMatrix);
cudaFree(dconvLayer);
return 0;
}
|
14,357 | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
void print_instructions() {
printf(
"usage: ./mandelbrot_cuda c_x_min c_x_max c_y_min c_y_max image_size x_grid_dimension y_grid_dimension\n");
printf("examples with image_size = 4096, x_grid_dimension = 64 and y_grid_dimension = 64:\n");
printf(
" Full Picture: ./mandelbrot_cuda -2.5 1.5 -2.0 2.0 4096 64 64\n");
printf(
" Seahorse Valley: ./mandelbrot_cuda -0.8 -0.7 0.05 0.15 "
"4096 64 64\n");
printf(
" Elephant Valley: ./mandelbrot_cuda 0.175 0.375 -0.1 0.1 "
"4096 64 64\n");
printf(
" Triple Spiral Valley: ./mandelbrot_cuda -0.188 -0.012 0.554 0.754 "
"4096 64 64\n");
exit(0);
};
void write_to_file(unsigned char *image_buffer, int i_x_max, int i_y_max,
int image_buffer_size) {
FILE *file;
const char *filename = "output.ppm";
const char *comment = "# ";
int max_color_component_value = 255;
file = fopen(filename, "wb");
fprintf(file, "P6\n %s\n %d\n %d\n %d\n", comment, i_x_max, i_y_max,
max_color_component_value);
for (int i = 0; i < image_buffer_size; i++) {
unsigned char buffer[3] = {image_buffer[i],
image_buffer[i + image_buffer_size],
image_buffer[i + (image_buffer_size * 2)]};
fwrite(buffer, 1, 3, file);
};
fclose(file);
};
__global__
void compute_mandelbrot(unsigned char *d_image_buffer, int gradient_size,
int iteration_max, double c_x_min, double c_x_max,
double c_y_min, double c_y_max, int image_buffer_size,
int i_x_max, int i_y_max, double pixel_width,
double pixel_height) {
double z_x;
double z_y;
double z_x_squared;
double z_y_squared;
double escape_radius_squared = 4;
int iteration;
int i_x;
int i_y;
double c_x;
double c_y;
int colors[17][3] = {
{66, 30, 15}, {25, 7, 26}, {9, 1, 47}, {4, 4, 73},
{0, 7, 100}, {12, 44, 138}, {24, 82, 177}, {57, 125, 209},
{134, 181, 229}, {211, 236, 248}, {241, 233, 191}, {248, 201, 95},
{255, 170, 0}, {204, 128, 0}, {153, 87, 0}, {106, 52, 3},
{16, 16, 16},
};
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (i_y = index; i_y < i_y_max; i_y += stride) {
c_y = c_y_min + i_y * pixel_height;
if (fabs(c_y) < pixel_height / 2) {
c_y = 0.0;
};
for (i_x = 0; i_x < i_x_max; i_x++) {
c_x = c_x_min + i_x * pixel_width;
z_x = 0.0;
z_y = 0.0;
z_x_squared = 0.0;
z_y_squared = 0.0;
for (iteration = 0; iteration < iteration_max &&
((z_x_squared + z_y_squared) < escape_radius_squared);
iteration++) {
z_y = 2 * z_x * z_y + c_y;
z_x = z_x_squared - z_y_squared + c_x;
z_x_squared = z_x * z_x;
z_y_squared = z_y * z_y;
};
if (iteration == iteration_max) {
d_image_buffer[(i_y_max * i_y) + i_x] = colors[16][0];
d_image_buffer[(i_y_max * i_y) + i_x + image_buffer_size] =
colors[16][1];
d_image_buffer[(i_y_max * i_y) + i_x + (2 * image_buffer_size)] =
colors[16][2];
} else {
int color = iteration % 16;
d_image_buffer[(i_y_max * i_y) + i_x] = colors[color][0];
d_image_buffer[(i_y_max * i_y) + i_x + image_buffer_size] =
colors[color][1];
d_image_buffer[(i_y_max * i_y) + i_x + (2 * image_buffer_size)] =
colors[color][2];
};
};
};
};
int main(int argc, char *argv[]) {
if (argc < 8) {
print_instructions();
return 0;
}
double c_x_min;
double c_x_max;
double c_y_min;
double c_y_max;
int image_size;
int x_grid_dimension;
int y_grid_dimension;
unsigned char *image_buffer;
unsigned char *d_image_buffer;
int gradient_size = 16;
int iteration_max = 200;
sscanf(argv[1], "%lf", &c_x_min);
sscanf(argv[2], "%lf", &c_x_max);
sscanf(argv[3], "%lf", &c_y_min);
sscanf(argv[4], "%lf", &c_y_max);
sscanf(argv[5], "%d", &image_size);
sscanf(argv[6], "%d", &x_grid_dimension);
sscanf(argv[7], "%d", &y_grid_dimension);
int i_x_max = image_size;
int i_y_max = image_size;
int image_buffer_size = image_size * image_size;
double pixel_width = (c_x_max - c_x_min) / i_x_max;
double pixel_height = (c_y_max - c_y_min) / i_y_max;
int rgb_size = 3;
image_buffer = (unsigned char *)malloc(sizeof(unsigned char) *
image_buffer_size * rgb_size);
cudaMalloc(&d_image_buffer, sizeof(unsigned char) * image_buffer_size * rgb_size);
int blockSize = 256;
dim3 gridSize = dim3(x_grid_dimension, y_grid_dimension);
compute_mandelbrot<<<gridSize, blockSize>>>(d_image_buffer, gradient_size,
iteration_max, c_x_min, c_x_max, c_y_min, c_y_max,
image_buffer_size, i_x_max, i_y_max, pixel_width,
pixel_height);
cudaDeviceSynchronize();
cudaMemcpy(image_buffer, d_image_buffer,
sizeof(unsigned char) * image_buffer_size * rgb_size,
cudaMemcpyDeviceToHost);
// write_to_file(image_buffer, i_x_max, i_y_max, image_buffer_size);
return 0;
};
|
14,358 | ///-------------------------------------------------------------------------------------------------
// file: pipelinestresstest.cu
//
// summary: long-latency compute kernels to help stress PTask
// pipeline parallelism / asynchrony.
///-------------------------------------------------------------------------------------------------
typedef struct _pstress_params_t {
int g_tex_cols;
int g_tex_rows;
int g_tex_halfwin;
int g_pad1;
} PSTRESSPARMS;
extern "C" __global__ void
op(
float* A,
float * B,
float * C,
PSTRESSPARMS parms
)
{
float t = 0;
int nCells = 0;
int xidx = blockDim.x * blockIdx.x + threadIdx.x;
int yidx = blockDim.y * blockIdx.y + threadIdx.y;
for(int di=-parms.g_tex_halfwin; di<parms.g_tex_halfwin; di++) {
for(int dj=-parms.g_tex_halfwin; dj<parms.g_tex_halfwin; dj++) {
if(yidx+di < 0 || yidx+di >= parms.g_tex_rows) continue;
if(xidx+dj < 0 || xidx+dj >= parms.g_tex_cols) continue;
int idx = ((yidx+di) * parms.g_tex_cols) + (xidx+dj);
float aval = A[idx];
float bval = B[idx];
float abprod = aval*bval;
float sina = sin(aval);
float cosb = cos(bval);
float tanab = tan(abprod);
float inc = tanab/(sina*cosb);
t+=inc;
nCells++;
}
}
int cidx = ( yidx * parms.g_tex_cols + xidx );
float v = nCells > 0 ? t/nCells : 0.0f;
C[cidx] = v;
}
|
14,359 | #include "includes.h"
__global__ void kRgb2CIELab(uchar4* inputImg, float4* outputImg, int width, int height)
{
int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width;
int offset = offsetBlock + threadIdx.x + threadIdx.y * width;
uchar4 nPixel=inputImg[offset];
float _b=(float)nPixel.x/255.0;
float _g=(float)nPixel.y/255.0;
float _r=(float)nPixel.z/255.0;
float x=_r*0.412453 +_g*0.357580 +_b*0.180423;
float y=_r*0.212671 +_g*0.715160 +_b*0.072169;
float z=_r*0.019334 +_g*0.119193 +_b*0.950227;
x/=0.950456;
float y3=exp(log(y)/3.0);
z/=1.088754;
float l,a,b;
x = x>0.008856 ? exp(log(x)/3.0) : (7.787*x+0.13793);
y = y>0.008856 ? y3 : 7.787*y+0.13793;
z = z>0.008856 ? z/=exp(log(z)/3.0) : (7.787*z+0.13793);
l = y>0.008856 ? (116.0*y3-16.0) : 903.3*y;
a=(x-y)*500.0;
b=(y-z)*200.0;
float4 fPixel;
fPixel.x=l;
fPixel.y=a;
fPixel.z=b;
outputImg[offset]=fPixel;
} |
14,360 | #include <math.h>
/*
This file contains the custome kernels used by the program
*/
//Helper function for device used to compute global thread ID
__device__
unsigned int getThreadID() {
const unsigned int threadsPerBlock = blockDim.x * blockDim.y;
const unsigned int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
const unsigned int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
const unsigned int thread_idx = blockNumInGrid * threadsPerBlock + threadNumInBlock;
return thread_idx;
}
/*
Intended to be used on the result of (Vector Matrix) X (Centroid Matrix)
Each thread iterates over all the distances between a single vector and the other centroids.
This will normalize the distance, and keep track of the centroid whose distance is maximized (most similar)
The result will be stored in the result vector, which will hold the closest centroid for each vector.
*/
__global__
void kernel_normalize_and_find_closest(int *rowIndex, int *colIndex, float *valIndex,
float *vectorMagnitude, int numVectors,
float *centroidMagnitude, int numCentroids, int *result) {
unsigned int thread_idx = getThreadID();
if (thread_idx < numVectors) {
float vectorLength = vectorMagnitude[thread_idx];
float centroidLength;
int rowStartOffset = rowIndex[thread_idx];
int rowEndOffset = rowIndex[thread_idx + 1];
int closestCentroid = 0;
int currentCentroid = 0;
float maxDistance = -1000;
float normalizedDistance;
float distance;
for (int i = rowStartOffset; i < rowEndOffset; i++) {
currentCentroid = colIndex[i];
centroidLength = centroidMagnitude[currentCentroid];
distance = valIndex[i];
normalizedDistance = distance / (vectorLength * centroidLength);
if (normalizedDistance > maxDistance) {
maxDistance = normalizedDistance;
closestCentroid = currentCentroid;
}
}
result[thread_idx] = closestCentroid;
}
}
/*
Computes the magnitude for each centroid vector, Each thread (instance of this kernel)
will loop over all the values in the centroid vector and compute the length.
*/
__global__
void kernel_compute_centroid_lengths(int *rowIndex, float *valIndex,
float *centroidMagnitude, int numCentroids) {
unsigned int thread_idx = getThreadID();
if (thread_idx < numCentroids) {
int rowStartOffset = rowIndex[thread_idx];
int rowEndOffset = rowIndex[thread_idx + 1];
float length = 0;
float val;
for (int i = rowStartOffset; i < rowEndOffset; i++) {
val = valIndex[i];
length += (val * val);
}
centroidMagnitude[thread_idx] = sqrt(length);
}
}
/*
Simple kernel that is used to subract 2 vectors and taking the absolute value
of the result for each element-wise subtraction. This is used as part of finding the
differences between the column assignments of 2 cluster matricies.
This kernel doesn't require extra memory to store the result rather the results are
stored in the first argument arry.
*/
__global__
void kernel_subtract_abs(int *colIndexOld, int *colIndexNew, int length) {
unsigned int thread_idx = getThreadID();
if (thread_idx < length) {
int result = colIndexOld[thread_idx] - colIndexNew[thread_idx];
colIndexOld[thread_idx] = abs(result);
}
}
__global__
void kernel_diff(int *colIndexOld, int *colIndexNew, int length) {
unsigned int thread_idx = getThreadID();
if (thread_idx < length) {
if (colIndexOld[thread_idx] == colIndexNew[thread_idx]) {
colIndexOld[thread_idx] = 0;
} else {
colIndexOld[thread_idx] = 1;
}
}
}
/*
Used to average a centroid vector. A centroid is the averaged sum of all the vectors in its cluster.
Loading up the cluster matrix with values (1/numVectors in a cluster) is done first before computing the
centroid verticies.
*/
__global__
void kernel_average_clusters(float *centroidTotals, int numVectors, int *colIndex, float *valIndex) {
unsigned int thread_idx = getThreadID();
if (thread_idx < numVectors) {
int centroid = colIndex[thread_idx];
float centroidCount = centroidTotals[centroid];
valIndex[thread_idx] = 1.0/centroidCount;
}
}
|
14,361 | #include "includes.h"
__global__ void makeSwapsKernel(int size, int *partition, int *partSizes, int *nodeWeights, int *swap_to, int *swap_from, int *swap_index, float *desirability)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx == size - 1)
{
if(desirability[idx] > .1)
{
int swapTo = swap_to[idx];
int swapFrom = swap_from[idx];
int swapIndex = swap_index[idx];
int nodeWeight = nodeWeights[swapIndex];
partition[swapIndex] = swapTo;
atomicAdd(&partSizes[swapTo], nodeWeight);
atomicAdd(&partSizes[swapFrom], -nodeWeight);
//printf("Swapping node: %d, %d from part: %d, %d to part: %d, %d desirability: %f\n", swapIndex, nodeWeight, swapFrom, partSizes[swapFrom], swapTo, partSizes[swapTo], desirability[idx]);
}
}
else if(idx < size - 1)
{
if(desirability[idx] > .1 && swap_from[idx] != swap_from[idx + 1])
{
int swapTo = swap_to[idx];
int swapFrom = swap_from[idx];
int swapIndex = swap_index[idx];
int nodeWeight = nodeWeights[swapIndex];
partition[swapIndex] = swapTo;
atomicAdd(&partSizes[swapTo], nodeWeight);
atomicAdd(&partSizes[swapFrom], -nodeWeight);
//printf("Swapping node: %d, %d from part: %d, %d to part: %d, %d desirability: %f\n", swapIndex, nodeWeight, swapFrom, partSizes[swapFrom], swapTo, partSizes[swapTo], desirability[idx]);
}
}
} |
14,362 | #include "includes.h"
__global__ void smemKernel(int N, float *input, float *output){
int b_size = blockDim.x, b_idx = blockIdx.x, t_idx = threadIdx.x;
int global_i = b_size * b_idx + t_idx, n_chk = (N + SHARE_SIZE - 1)/SHARE_SIZE;
__shared__ float buff[SHARE_SIZE];
for(int q=0;q<n_chk;++q){
int left = q*SHARE_SIZE, right = min(left + SHARE_SIZE, N);
for(int i = t_idx + left; i < right; i += b_size) buff[i-left] = input[i];
__syncthreads();
if(global_i < N){
for(int i = left; i < right; ++i) output[global_i] += buff[i-left];
}
__syncthreads();
}
output[global_i] /= N;
return ;
} |
14,363 | #include <iostream>
#include <stdio.h>
#include <vector>
// Defining number of elements in array.
#define N 100
// Defining vector addition function for single core CPU.
void cpu_add(int *host_a, int *host_b, int *host_c)
{
int tid = 0;
while (tid < N)
{
host_c[tid] = host_a[tid] + host_b[tid];
tid ++;
}
}
int main(int argc, char **argv)
{
int host_a[N], host_b[N], host_c[N];
// Initializing two arrays for addition.
for (unsigned int i = 0; i < N; ++i)
{
host_a[i] = 2 * i;
host_b[i] = i;
}
// Calling CPU function for vector addition.
cpu_add(host_a, host_b, host_c);
// Print answer.
// printf("Vector addition on CPU\n");
std::cout << "Vector addition on CPU\n";
for (unsigned int i = 0; i < N; ++i)
{
// printf("The sum of %d element is %d + %d = %d\n", i, host_a[i], host_b[i], host_c[i]);
std::cout << "The sum of " << i << " element is " << host_a[i] << " + " << host_b[i] <<
" = " << host_c[i] << std::endl;
}
return 0;
} |
14,364 | #include <iostream>
#include <chrono>
#define H2D cudaMemcpyHostToDevice
#define D2H cudaMemcpyDeviceToHost
#define OK cudaSuccess
using namespace std;
typedef uint32_t uint;
//CPU
uint i = 0, ind = 0;
const uint N = 8E3;
const uint NBytes_f32 = sizeof( float ) * N;
const uint nArrays = 1; //single default stream of 1D array
float *h_arr[ nArrays ], *h_result[ nArrays ]; //pinned H2D && D2H transfers
//GPU
float *d_arr[ nArrays ];
__device__ float4 d_sArr[ 1 ]; //d_s[].x;.y;.z;.w; cudaMemcpyToSymbol(*dest,*src,byteSize);cudaMemcpyFromSymbol(*dest,*src,byteSize);
const uint nThreads = 512, nBlocks = ( N / nThreads ) + 1;
inline int freeGPUMem( void )
{
for ( i= 0; i < nArrays; i++ )
{
//HOST
cudaFreeHost( h_arr[ i ] );
cudaFreeHost( h_result[ i ] );
//DEVICE
cudaFree( d_arr[ i ] );
};
cudaDeviceReset();
return 0;
};
inline void initGPUMem( void )
{
for ( i= 0; i < nArrays; i++ )
{
if ( cudaMallocHost( ( void** ) &h_arr[ i ], NBytes_f32 ) != cudaSuccess ) { printf( "cudaMallocHost err!\n" ); return; };
if ( cudaMallocHost( ( void** ) &h_result[ i ], NBytes_f32 ) != cudaSuccess ) { printf( "cudaMallocHost err!\n" ); return; };
if ( cudaMalloc( ( void** ) &d_arr[ i ], NBytes_f32 ) != cudaSuccess ) { printf( "cudaMalloc err!\n" ); return; };
// ...
// h_arr[] data load
for ( ind = 0; ind < N; ind++ )
h_arr[ i ][ ind ] = float( ind );
for ( ind = 0; ind < 3; ind++ )
cout << "h_arr[" << i << "][" << ind << "]: " << h_arr[ i ][ ind ] << endl;
// ...
cudaMemcpyAsync( d_arr[ i ], h_arr[ i ], NBytes_f32, H2D );
};
};
__global__ void emptyKernel( float *d_in )
{
uint tdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( tdx < N )
{
printf( "thread[%i].block[%i]\n", tdx, blockDim.x );
};
};
int main( void )
{
initGPUMem();
for( i = 0; i < nArrays; i++ )
{
auto f1 = chrono::high_resolution_clock::now();
emptyKernel<<< 1, 1 >>>( d_arr[ i ] );
cudaDeviceSynchronize();
auto f2 = chrono::high_resolution_clock::now();
cout << "GPU kernel took <chrono> : "
<< chrono::duration_cast< chrono::nanoseconds >( f2 - f1 ).count()
<< " [ns]\n";
cudaEvent_t start1, stop1;
cudaEventCreate( &start1 );
cudaEventCreate( &stop1 );
cudaEventRecord( start1 );
emptyKernel<<< 1, 1 >>>( d_arr[ i ] );
cudaDeviceSynchronize();
cudaEventRecord( stop1 );
cudaEventSynchronize( stop1 );
float milliseconds = 0.0f;
cudaEventElapsedTime( &milliseconds, start1, stop1 );
cout << "nBlocks[" << nBlocks << "]; nThreads[" << nThreads << "]; GPU kernel took <cudaEvent> : "
<< milliseconds * 1000000.0f << "[ns]\n";
cudaMemcpy( h_result[ i ], d_arr[ i ], NBytes_f32, D2H );
for ( ind = 0; ind < 3; ind++ )
cout << "h_result[" << i << "][" << ind << "]: " << h_result[ i ][ ind ] << endl;
};
return freeGPUMem();
}
|
14,365 | #include "includes.h"
__global__ void writeBoundary_kernel(int startPos, int rLen, int* d_startArray, int* d_startSumArray, int* d_bounary)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int flag=d_startArray[pos];
int writePos=d_startSumArray[pos];
if(flag==1)
d_bounary[writePos]=pos;
}
} |
14,366 | #include "includes.h"
/**
* @file
* @brief CIS 565 Version Checker
* @details A simple CUDA hello-world-style program for Patrick Cozzi's
* CIS 565: GPU Programming, at the University of Pennsylvania.
* @authors Starter code: Yining Karl Li, Liam Boone, Harmony Li, Kai Ninomiya
* @copyright University of Pennsylvania
*/
__global__ void createVersionVisualization(uchar4* PBOpos, int width, int height, int major, int minor) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * width);
if (x <= width && y <= height) {
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = 0;
PBOpos[index].y = 0;
PBOpos[index].z = 0;
int ver = y < height / 2 ? major : minor;
if (ver == 0) {
PBOpos[index].x = 255;
} else if (ver == 1) {
PBOpos[index].y = 255;
} else if (ver == 2) {
PBOpos[index].z = 255;
} else if (ver == 3) {
PBOpos[index].x = 255;
PBOpos[index].y = 255;
} else if (ver == 5) {
PBOpos[index].z = 255;
PBOpos[index].y = 255;
}
}
} |
14,367 | #include <iostream>
#include <limits>
#include <cuda.h>
#include <curand_kernel.h>
using std::cout;
using std::endl;
typedef unsigned long long Count;
typedef std::numeric_limits<double> DblLim;
const Count WARP_SIZE = 32;
const Count NBLOCKS = 64;
const Count ITERATIONS = 10000000;
const Count REPETITIONS = 400;
__global__ void monte_carlo_pi(Count *totals) {
// Create shared memory for block
__shared__ Count counter[WARP_SIZE];
counter[threadIdx.x] = 0;
// Unique ID of the thread
// use this id to seed the rng for each thread
int tid = threadIdx.x + blockIdx.x * blockDim.x;
curandState_t rng;
curand_init(clock64(), tid, 0, &rng);
// Run through iterations, sample two uniform points,
// then calculate test if points fall within circle
for (int i = 0; i < ITERATIONS; i++) {
float x = curand_uniform(&rng);
float y = curand_uniform(&rng);
counter[threadIdx.x] += 1 - int(x * x + y * y);
}
// In every block use the first thread to aggregate the results
// using the shared memory within the block. Shared memory is fast!
if (threadIdx.x == 0) {
totals[blockIdx.x] = 0;
for (int i = 0; i < WARP_SIZE; i++) {
totals[blockIdx.x] += counter[i];
}
}
}
int main(int argc, char **argv) {
// Set precision of cout numbers
cout.precision(DblLim::max_digits10);
// Check if there is a cuda device available
int numDev;
cudaGetDeviceCount(&numDev);
if (numDev < 1) {
cout << "CUDA device missing! Do you need to use optirun?\n";
return 1;
}
// Log base params
cout << "Starting monte carlo simulation with \n"
<< NBLOCKS << " blocks, \n"
<< WARP_SIZE << " threads, and \n"
<< ITERATIONS << " iterations, over \n"
<< REPETITIONS << " repetitions" << endl;
// Allocate duplicate size host and device memory to store
// the counts of each blocks monte carlo process
Count *hostOutput, *deviceOutput;
hostOutput = new Count[NBLOCKS]; // Host memory
cudaMalloc(&deviceOutput, sizeof(Count) * NBLOCKS); // Device memory
Count total = 0;
Count tests = NBLOCKS * ITERATIONS * WARP_SIZE;
for (int repetition = 1; repetition <= REPETITIONS; repetition++) {
// Launch kernel
monte_carlo_pi<<<NBLOCKS, WARP_SIZE>>>(deviceOutput);
// Copy back memory used on device and free
cudaMemcpy(hostOutput, deviceOutput, sizeof(Count) * NBLOCKS, cudaMemcpyDeviceToHost);
// Compute total hits
for (int i = 0; i < NBLOCKS; i++) {
total += hostOutput[i];
}
// Set maximum precision for decimal printing
cout << "π ≅ " << 4.0 * (double)total/(double)(tests * repetition)
<< endl;
}
// Free device and host memory and exit process
cudaFree(deviceOutput);
free(hostOutput);
return 0;
}
|
14,368 | /*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
#include <cuda_fp16.h>
namespace chainer_trt {
namespace plugin {
template <typename T>
__global__ void eltw_sum_kernel(const T* src_gpu, int n_in,
const T* vals_gpu, int n_values,
T* dst_gpu) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(n_in <= idx)
return;
const int batch = blockIdx.y;
const int idx_in_vals = (n_values == 1 ? 0 : idx);
dst_gpu[batch * n_in + idx] =
vals_gpu[idx_in_vals] + src_gpu[batch * n_in + idx];
}
template <typename T>
void apply_eltw_sum(const T* src_gpu, int n_in, const T* vals_gpu,
int n_values, T* dst_gpu, int batch_size,
cudaStream_t stream) {
const int block_size = 1024;
const int grid_size = (int)std::ceil(1.0 * n_in / block_size);
dim3 grid(grid_size, batch_size);
eltw_sum_kernel<T><<<grid, block_size, 0, stream>>>(
src_gpu, n_in, vals_gpu, n_values, dst_gpu);
}
template <typename T>
__global__ void eltw_sub_kernel(const T* src_gpu, int n_in,
const T* vals_gpu, int n_values,
T* dst_gpu) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(n_in <= idx)
return;
const int batch = blockIdx.y;
const int idx_in_vals = (n_values == 1 ? 0 : idx);
dst_gpu[batch * n_in + idx] =
vals_gpu[idx_in_vals] - src_gpu[batch * n_in + idx];
}
template <typename T>
void apply_eltw_sub(const T* src_gpu, int n_in, const T* vals_gpu,
int n_values, T* dst_gpu, int batch_size,
cudaStream_t stream) {
const int block_size = 1024;
const int grid_size = (int)std::ceil(1.0 * n_in / block_size);
dim3 grid(grid_size, batch_size);
eltw_sub_kernel<T><<<grid, block_size, 0, stream>>>(
src_gpu, n_in, vals_gpu, n_values, dst_gpu);
}
template <typename T>
__global__ void eltw_mul_kernel(const T* src_gpu, int n_in,
const T* vals_gpu, int n_values,
T* dst_gpu) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(n_in <= idx)
return;
const int batch = blockIdx.y;
const int idx_in_vals = (n_values == 1 ? 0 : idx);
dst_gpu[batch * n_in + idx] =
vals_gpu[idx_in_vals] * src_gpu[batch * n_in + idx];
}
template <typename T>
void apply_eltw_mul(const T* src_gpu, int n_in, const T* vals_gpu,
int n_values, T* dst_gpu, int batch_size,
cudaStream_t stream) {
const int block_size = 1024;
const int grid_size = (int)std::ceil(1.0 * n_in / block_size);
dim3 grid(grid_size, batch_size);
eltw_mul_kernel<T><<<grid, block_size, 0, stream>>>(
src_gpu, n_in, vals_gpu, n_values, dst_gpu);
}
template <typename T>
__global__ void eltw_div_kernel(const T* src_gpu, int n_in,
const T* vals_gpu, int n_values,
T* dst_gpu) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(n_in <= idx)
return;
const int batch = blockIdx.y;
const int idx_in_vals = (n_values == 1 ? 0 : idx);
dst_gpu[batch * n_in + idx] =
vals_gpu[idx_in_vals] / src_gpu[batch * n_in + idx];
}
template <typename T>
void apply_eltw_div(const T* src_gpu, int n_in, const T* vals_gpu,
int n_values, T* dst_gpu, int batch_size,
cudaStream_t stream) {
const int block_size = 1024;
const int grid_size = (int)std::ceil(1.0 * n_in / block_size);
dim3 grid(grid_size, batch_size);
eltw_div_kernel<T><<<grid, block_size, 0, stream>>>(
src_gpu, n_in, vals_gpu, n_values, dst_gpu);
}
// explicit instantiation (without this, link error will happen)
template void apply_eltw_sum(const float*, int, const float*, int, float*,
int, cudaStream_t);
template void apply_eltw_sub(const float*, int, const float*, int, float*,
int, cudaStream_t);
template void apply_eltw_mul(const float*, int, const float*, int, float*,
int, cudaStream_t);
template void apply_eltw_div(const float*, int, const float*, int, float*,
int, cudaStream_t);
template void apply_eltw_sum(const __half*, int, const __half*, int,
__half*, int, cudaStream_t);
template void apply_eltw_sub(const __half*, int, const __half*, int,
__half*, int, cudaStream_t);
template void apply_eltw_mul(const __half*, int, const __half*, int,
__half*, int, cudaStream_t);
template void apply_eltw_div(const __half*, int, const __half*, int,
__half*, int, cudaStream_t);
}
}
|
14,369 | #include "utilities.cuh"
float utilities::median(thrust::device_vector<float> &data, bool is_sorted) {
if (data.size() == 0) {
return std::numeric_limits<float>::infinity();
}
float med;
auto n = data.size();
if (!is_sorted) {
thrust::sort(data.begin(), data.end());
}
// // make an iterator for both value and index
// thrust::device_vector<int> idx(n);
// thrust::sequence(idx.begin(), idx.end());
//
// thrust::transform(data.begin(), data.end(), idx.begin(), data.begin(),
// med_trans(n));
auto first = n % 2 == 0 ? n / 2 - 1 : n / 2;
auto last = n / 2 + 1;
med = thrust::reduce(data.begin() + first, data.begin() + last,
0.0f, thrust::plus<float>());
if (n % 2 == 0) {
med *= 0.5f;
}
return med;
}
template<class T>
std::vector<std::vector<T>> utilities::part_nearby(std::vector<T> &arr,
uint64_t thresh) {
std::map<uint64_t, std::vector<uint64_t>> collections;
collections[arr.at(0)] = std::vector<uint64_t>();
collections[arr.at(0)].push_back(arr.at(0));
auto i = 0;
while (i < arr.size()) {
auto prev_i = i;
for (auto j = i + 1; j < arr.size(); ++j) {
if (arr.at(j) - arr.at(j - 1) < thresh) {
collections[arr.at(i)].push_back(arr.at(j));
} else {
i = j;
collections[arr.at(i)] = std::vector<uint64_t>();
collections[arr.at(i)].push_back(arr.at(i));
}
}
if (i == prev_i) {
++i;
if (i < arr.size() - 1) {
collections[arr.at(i)] = std::vector<uint64_t>();
collections[arr.at(i)].push_back(arr.at(i));
}
}
}
std::vector<std::vector<T>> values;
for (auto it = collections.begin(); it != collections.end(); ++it) {
values.push_back(it->second);
}
return values;
}
template
std::vector<std::vector<uint64_t>>
utilities::part_nearby(std::vector<uint64_t> &arr, uint64_t thresh);
template<class T>
long utilities::argmax(std::vector<T> vec) {
return std::distance(vec.begin(), std::max_element(vec.begin(), vec.end()));
}
template
long utilities::argmax(std::vector<uint64_t> vec);
template<class T>
std::vector<uint64_t> utilities::argsort(const std::vector<T> &vec) {
std::vector<uint64_t> as(vec.size());
for (auto i = 0; i < vec.size(); ++i)
as.at(i) = i;
std::sort(as.begin(), as.end(), [&](uint64_t i, uint64_t j) {
return vec.at(i) <= vec.at(j);
});
return as;
}
template
std::vector<uint64_t> utilities::argsort(const std::vector<uint32_t> &vec);
template
std::vector<uint64_t> utilities::argsort(const std::vector<uint64_t> &vec); |
14,370 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define THREADS_PER_BLOCK (512)
#define DEFAULT_SIZE (160*160*THREADS_PER_BLOCK)
/* usage statement */
void usage()
{
printf("Usage: prime_cuda N\n");
exit(0);
}
__global__ void sum_primes( unsigned int *offset, unsigned int *N, unsigned long long *sum )
{
unsigned int i, j, max;
unsigned int index = (unsigned int) *offset + ((blockIdx.x * gridDim.y + blockIdx.y) * THREADS_PER_BLOCK) + threadIdx.x;
unsigned long long blocksum = 0;
__shared__ unsigned int block_arr[THREADS_PER_BLOCK];
block_arr[threadIdx.x] = index;
/* skip 0, 1, and anything beyond N */
if(index < 2 || index >= *N) block_arr[threadIdx.x] = 0;
/* determine last number to check in the primality check for this block */
max = sqrt( (double) index + THREADS_PER_BLOCK );
if( sqrt( (double) *N) < max ) max = sqrt( (double) *N );
/* loop over each number in this block and check if it is divisable by i */
for(i = threadIdx.x + 2; i <= max; i += THREADS_PER_BLOCK)
{
for(j = 0; j < THREADS_PER_BLOCK; j++)
{
if(block_arr[j] % i == 0 && block_arr[j] != i) block_arr[j] = 0;
}
}
/* synchronize after the computation */
__syncthreads();
/* reduce the results from this block to a single value */
if(threadIdx.x == 0)
{
for(i = 0; i < THREADS_PER_BLOCK; i++)
{
blocksum += block_arr[i];
}
/* add the sum from this block to the overall sum */
atomicAdd(sum, blocksum);
}
}
int main(int argc, char **argv)
{
unsigned int N;
unsigned int num_threads, num_blocks, blockx, blocky;
unsigned int def_block_size, size_thresh;
unsigned int *n_cuda;
unsigned int *off_cuda;
unsigned long long *sum_cuda;
unsigned long long sum;
struct timeval tv;
double t0, t1;
unsigned int subN, offset, size;
if(argc != 2)
usage();
N = atoi(argv[1]);
/* get the starting time before the prime summation call */
gettimeofday(&tv, NULL);
t0 = tv.tv_usec;
t0 /= 1000000.0;
t0 += tv.tv_sec;
/* allocate the necessary CUDA device variables */
cudaMalloc( (void **)&n_cuda, sizeof(unsigned int) );
cudaMalloc( (void **)&off_cuda, sizeof(unsigned int) );
cudaMalloc( (void **)&sum_cuda, sizeof(unsigned long long) );
sum = 0;
cudaMemcpy( sum_cuda, (void *) &sum, sizeof(unsigned long long), cudaMemcpyHostToDevice );
def_block_size = DEFAULT_SIZE;
size_thresh = 100000000;
printf("Prime CUDA\n");
for(offset = 0; offset < N; offset += def_block_size)
{
if(offset >= size_thresh)
{
size_thresh *= 4;
def_block_size /= 2;
}
/* determine the subset of numbers to calculate for this CUDA kernel call */
if(offset + def_block_size < N) size = def_block_size;
else size = N - offset;
subN = offset + size;
/* determine how many blocks are needed */
num_blocks = ceil( (double) size / THREADS_PER_BLOCK );
num_threads = THREADS_PER_BLOCK;
/* X and Y dimensions for the CUDA blocks */
blockx = ceil( sqrt(num_blocks) );
blocky = ceil( sqrt(num_blocks) );
dim3 blocks(blockx, blocky);
dim3 threads(num_threads);
printf("subN: %d offset: %d size: %d\n", subN, offset, size);
printf("Blocks: %d (x: %d y: %d) tpb: %d\n", num_blocks, blockx, blocky, num_threads);
/* copy the variables for this run into the device memory */
cudaMemcpy( n_cuda, (void *) &subN, sizeof(unsigned int), cudaMemcpyHostToDevice );
cudaMemcpy( off_cuda, (void *) &offset, sizeof(unsigned int), cudaMemcpyHostToDevice );
/* call the CUDA kernel */
sum_primes <<< blocks, threads >>> ( off_cuda, n_cuda, sum_cuda );
/* wait for it to finish */
cudaDeviceSynchronize();
/* check for errors */
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
/* print the CUDA error message and exit */
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
}
/* obtain the final result from the device memory */
cudaMemcpy( &sum, sum_cuda, sizeof(unsigned long long), cudaMemcpyDeviceToHost );
/* get the end time to calculate the total duration of the prime summation */
gettimeofday(&tv, NULL);
t1 = tv.tv_usec;
t1 /= 1000000.0;
t1 += tv.tv_sec;
printf("N: %d\n", N);
printf("sum of primes up to N: %lld\n", sum);
printf("Time elapsed: %lf\n\n", t1 - t0);
/* free device memory */
cudaFree(n_cuda);
cudaFree(off_cuda);
cudaFree(sum_cuda);
return 0;
}
|
14,371 | #include <stdio.h>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <sys/time.h>
#define TPB 256
#define EPSILON 0.0005
#define NTIME 1
#define ARRAY_SIZE 163840000
unsigned long get_time();
unsigned long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
unsigned long ret = tv.tv_usec;
ret /= 1000;
ret += (tv.tv_sec * 1000);
return ret;
}
__global__ void SAXPYgpuKernel(float *x, float *y, float a)
{
const long i = blockIdx.x*blockDim.x + threadIdx.x;
y[i] = x[i]*a + y[i];
}
void SAXPYcpu(float* x, float* y, float a){
for (long i = 0; i < ARRAY_SIZE ; i++){
y[i] = x[i]*a + y[i];
}
}
bool equalVectors(float* a, float* b){
for (long i = 0; i < ARRAY_SIZE; i++){
if (std::abs(a[i] - b[i]) > EPSILON){
return false;
}
}
return true;
}
int main()
{
// seed for random number
srand (static_cast <unsigned> (time(0)));
// declare constant a
const float a = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Declare pointers for an array of floats
float* x = (float*) malloc (sizeof(float)*ARRAY_SIZE);
float* y = (float*) malloc (sizeof(float)*ARRAY_SIZE);
// set random values
for (long i = 0 ; i < ARRAY_SIZE ; ++i){
x[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
y[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
// CPU execution
long start_time_cpu = get_time();
float* x_cpu = (float*) malloc (sizeof(float)*ARRAY_SIZE);
float* y_cpu = (float*) malloc (sizeof(float)*ARRAY_SIZE);
// copy vector to use in the CPU
std::memcpy(x_cpu, x, ARRAY_SIZE*sizeof(float));
std::memcpy(y_cpu, y, ARRAY_SIZE*sizeof(float));
printf("Computing SAXPY on the CPU…");
SAXPYcpu(x_cpu, y_cpu, a);
printf("Done\n");
long end_time_cpu = get_time();
// GPU execution
// Declare pointers for an array of floats
long start_time_gpu = get_time();
float* x_gpu = 0;
float* y_gpu = 0;
float* y_gpu_res = (float*) malloc (sizeof(float)*ARRAY_SIZE);
// Allocate device memory
cudaMalloc(&x_gpu, ARRAY_SIZE*sizeof(float));
cudaMalloc(&y_gpu, ARRAY_SIZE*sizeof(float));
// Copy array to device
cudaMemcpy(x_gpu, x, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y_gpu, y, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
// Launch kernel to compute SAXPY
printf("Computing SAXPY on the GPU...");
SAXPYgpuKernel<<<(ARRAY_SIZE+TPB-1)/TPB, TPB>>>(x_gpu, y_gpu, a);
// Synchronize device
cudaDeviceSynchronize();
// Copy back from device to CPU
cudaMemcpy(y_gpu_res, y_gpu, ARRAY_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
printf("Done\n");
long end_time_gpu = get_time();
// Compare results
printf("Comparing the output for each implementation for ARRAY_SIZE = %d; Comparison: ", ARRAY_SIZE);
equalVectors(y_gpu_res, y_cpu) ? printf("Correct\n") : printf("Uncorrect\n");
printf("CPU time: %ld ms\n", end_time_cpu-start_time_cpu);
printf("GPU time: %ld ms\n\n", end_time_gpu-start_time_gpu);
// Free the memory
cudaFree(x_gpu);
cudaFree(y_gpu);
free(x);
free(y);
free(x_cpu);
free(y_cpu);
free(y_gpu_res);
return 0;
} |
14,372 | #include "includes.h"
__global__ void mat_vec_mult_fixed_dims(int *mat, int *vec, int *res) {
int mat_rows = 1024;
int mat_cols = 512;
// El for each thread, shared per block
__shared__ int smem[128];
for (int block_i = 0; block_i * gridDim.x < mat_rows; block_i++) {
int row = blockIdx.x + (block_i * gridDim.x);
int row_total = 0;
for (int thread_i = 0; thread_i * blockDim.x < mat_cols; thread_i++) {
int col = threadIdx.x + (thread_i * blockDim.x);
// Load mult in shmem
smem[threadIdx.x] = mat[row * mat_cols + col] * vec[col];
__syncthreads();
// Parallel reduction
for (int i = blockDim.x / 2; i > 0; i /= 2) {
if (threadIdx.x < i) {
int temp = smem[threadIdx.x] + smem[threadIdx.x + i];
smem[threadIdx.x] = temp;
}
__syncthreads();
}
// Only 1 thread needs to do this
if (threadIdx.x == 0)
row_total += smem[threadIdx.x];
}
// Load into ans (single thread)
if (threadIdx.x == 0)
res[row] = row_total;
}
} |
14,373 | __device__ void MatrixVector(void* param)
{
int* paramIn = (int*)param;
int N = paramIn[0];
int* A = paramIn+1;
int* B = paramIn+1+N*N;
int* C = paramIn+1+N*N+N;
int bx = blockIdx.x;
// Assume one block
bx = 0;
//int by = blockIdx.y;
int tx = threadIdx.x%32;
// Calculate the row index of the Pd element and M
int Row = bx * 32 + tx;
for (unsigned int i = Row; i < N; i=i+32)
{
//if(i < N)
{
int Pvalue = 0;
for (unsigned int k = 0; k < N; k++)
{
Pvalue += A[i*N+k] * B[k];
}
C[i] = Pvalue;
//printf("%d=%d\n",i,Pvalue);
}
}
}
|
14,374 | #include <stdio.h>
__global__ void addtoall(int* a, int b)
{
int i = threadIdx.x;
atomicAdd(&(a[i]), b);
}
int main(void)
{
int N = 32;
int *A = new int[N];
int *d_A;
cudaMalloc((void**)&d_A, N*sizeof(int));
cudaMemcpy(d_A, A, N*sizeof(int), cudaMemcpyHostToDevice);
addtoall<<<1,N>>>(d_A, 7);
addtoall<<<1,N>>>(d_A, 3);
addtoall<<<1,N>>>(d_A, 3);
addtoall<<<1,N>>>(d_A, 3);
addtoall<<<1,N>>>(d_A, 3);
addtoall<<<1,N>>>(d_A, 3);
addtoall<<<1,N>>>(d_A, 3);
cudaMemcpy(A, d_A, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i =0; i<N; i++)
{
printf("%d ", A[i]);
}
}
|
14,375 | #include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
#define ITER 1000000000
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CPU version of the vector add function
void vector_add_cpu(int *a, int *b, int *c, int n) {
int i;
// Add the vector elements a and b to the vector c
for (i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
// GPU version of the vector add function
__global__ void vector_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c, int n) {
int i = blockIdx.x;
// No for loop needed because the CUDA runtime
// will thread this ITER times
gpu_c[i] = gpu_a[i] + gpu_b[i];
}
int main() {
int *a, *b, *c;
int *gpu_a, *gpu_b, *gpu_c;
int *ha, *hb, *hc;
a = (int *)malloc(ITER * sizeof(int));
b = (int *)malloc(ITER * sizeof(int));
c = (int *)malloc(ITER * sizeof(int));
ha = (int *)malloc(ITER * sizeof(int));
hb = (int *)malloc(ITER * sizeof(int));
hc = (int *)malloc(ITER * sizeof(int));
// We need variables accessible to the GPU,
// so cudaMallocManaged provides these
cudaMallocManaged(&gpu_a, ITER * sizeof(int));
cudaMallocManaged(&gpu_b, ITER * sizeof(int));
cudaMallocManaged(&gpu_c, ITER * sizeof(int));
for (int i = 0; i < ITER; ++i) {
a[i] = i;
b[i] = i;
c[i] = i;
}
// Call the CPU function and time it
auto cpu_start = Clock::now();
vector_add_cpu(a, b, c, ITER);
auto cpu_end = Clock::now();
std::cout << "vector_add_cpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count()
<< " nanoseconds.\n";
// Call the GPU function and time it
// The triple angle brakets is a CUDA runtime extension that allows
// parameters of a CUDA kernel call to be passed.
// In this example, we are passing one thread block with ITER threads.
cudaMemcpy(gpu_a,a,ITER*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b,b,ITER*sizeof(int),cudaMemcpyHostToDevice);
auto gpu_start = Clock::now();
vector_add_gpu <<<ITER, 1024>>> (gpu_a, gpu_b, gpu_c, ITER);
cudaDeviceSynchronize();
auto gpu_end = Clock::now();
std::cout << "vector_add_gpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count()
<< " nanoseconds.\n";
cudaMemcpy(hc,gpu_c,ITER*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(hb,gpu_b,ITER*sizeof(int),cudaMemcpyDeviceToHost);
gpuErrchk(cudaMemcpy(ha,gpu_a,ITER*sizeof(int),cudaMemcpyDeviceToHost));
std::cout<<c[0]<<"\t"<<hc[0]<<std::endl;
std::cout<<c[int(ITER/3)]<<"\t"<<hc[int(ITER/3)]<<std::endl;
std::cout<<c[int(2*ITER/3)]<<"\t"<<hc[int(2*ITER/3)]<<std::endl;
std::cout<<c[int(ITER-1)]<<"\t"<<hc[int(ITER-1)]<<std::endl;
// Free the GPU-function based memory allocations
cudaFree(a);
cudaFree(b);
cudaFree(c);
// Free the CPU-function based memory allocations
free(a);
free(b);
free(c);
return 0;
} |
14,376 | #include "includes.h"
__global__ void changeValues(double *matrix, int size) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < size) {
double a = matrix[index] * 10;
int b = (int) a;
matrix[index] = (double) b;
}
} |
14,377 | #include <stdint.h>
#include <stdio.h>
#define N 34
#define THREADS_PER_BLOCK 32
__global__ void reverse(int* x) {
size_t index = THREADS_PER_BLOCK * blockIdx.x + threadIdx.x;
if (index < N/2) {
int temp = x[index];
x[index] = x[N-1-index];
x[N-1-index] = temp;
}
}
int main() {
// Allocate arrays for X and Y on the CPU
int* cpu_x = (int*)malloc(sizeof(int) * N);
// Initialize X and Y
int i;
for(i=0; i<N; i++) {
cpu_x[i] = i;
}
// Allocate space for X and Y on the GPU
int* gpu_x;
if(cudaMalloc(&gpu_x, sizeof(int) * N) != cudaSuccess) {
fprintf(stderr, "Failed to allocate X array on GPU\n");
exit(2);
}
// Copy the host X and Y arrays to the device X and Y arrays
if(cudaMemcpy(gpu_x, cpu_x, sizeof(int) * N, cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Failed to copy X to the GPU\n");
}
// How many blocks should be run, rounding up to include all threads?
size_t blocks = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// Run the saxpy kernel
reverse<<<blocks, THREADS_PER_BLOCK>>>(gpu_x);
// Wait for the kernel to finish
cudaDeviceSynchronize();
// Copy values from the GPU back to the CPU
if(cudaMemcpy(cpu_x, gpu_x,sizeof(int) * N, cudaMemcpyDeviceToHost) != cudaSuccess) {
fprintf(stderr, "Failed to copy X from the GPU\n");
}
for(i=0; i<N; i++) {
printf("%d: %d\n", i, cpu_x[i]);
}
cudaFree(gpu_x);
free(cpu_x);
return 0;
}
|
14,378 | #include <iostream>
#include <math.h>
using namespace std;
#define CLASSES 2
// function to convolve the elements of two arrays
__global__ void dense_relu(float *out, float *in, float *weights, int infeats) {
__shared__ float outTemp[CLASSES];
int batch = blockIdx.x;
int outfeat = threadIdx.y;
int infeat = threadIdx.x;
int outIndivId = outfeat;
int outId = batch * CLASSES + outfeat;
int wId = batch * CLASSES * infeats + outfeat * infeats + infeat;
int inId = batch * infeats + infeat;
outTemp[outIndivId] = 0;
__syncthreads();
outTemp[outIndivId] += in[inId] * weights[wId];
__syncthreads();
out[outId] = outTemp[outIndivId];
__syncthreads();
printf("%d %d %d %f %f\n", batch, outfeat, infeat, in[inId] * weights[wId], outTemp[outId]);
if (infeat == 0 && out[outId] < 0) {
out[outId] = 0;
}
}
int main(void) {
//Define arrays
float *input, *output, *weights;
cudaError_t s;
//Define sizes
int inFeatures = 10;
int batchSize = 2;
int nInput = inFeatures * batchSize;
int nOutput = CLASSES * batchSize;
int nWeights = inFeatures * CLASSES * batchSize;
float output_[nOutput];
float input_[nInput];
float weights_[nWeights];
//Initialize inputs
for (int i = 0; i < nInput; i++) {
input_[i] = 1.0f;
}
for (int i = 0; i < nWeights; i++) {
weights_[i] = 1.0f;
}
//Perform memory operations
cudaMalloc((void **) &input, nInput * sizeof(float));
cudaMalloc((void **) &output, nOutput * sizeof(float));
cudaMalloc((void **) &weights, nWeights * sizeof(float));
cudaMemset((void **) output, 0, nOutput * sizeof(float));
s = cudaMemcpy(input, input_, nInput * sizeof(float), cudaMemcpyHostToDevice);
cout << cudaGetErrorName(s) << endl;
s = cudaMemcpy(weights, weights_, nWeights * sizeof(float), cudaMemcpyHostToDevice);
cout << cudaGetErrorName(s) << endl;
// Run kernel on 1M elements on the CPU
dense_relu <<<batchSize, dim3(inFeatures, CLASSES)>>> (output, input, weights, inFeatures);
s = cudaDeviceSynchronize();
cout << cudaGetErrorName(s) << endl;
s = cudaMemcpy(output_, output, nOutput * sizeof(float), cudaMemcpyDeviceToHost);
cout << cudaGetErrorName(s) << endl;
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
float expectedAnswer = inFeatures;
for (int i = 0; i < nOutput; i++) {
maxError = fmax(maxError, fabs(output_[i] - expectedAnswer));
}
cout << "Max error: " << maxError << endl;
for (int i = 0; i < CLASSES; i++) {
for (int j = 0; j < batchSize; j++) {
cout << output_[j * CLASSES + i] << "\t";
}
cout << endl;
}
// Free memory
cudaFree(input);
cudaFree(output);
cudaFree(weights);
return 0;
}
|
14,379 | #include "includes.h"
/* we need these includes for CUDA's random number stuff */
using namespace std;
#define MAX 26
//int a[1000]; //array of all possible password characters
int b[1000]; //array of attempted password cracks
unsigned long long tries = 0;
char alphabet[] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' };
size_t result = 1000 * sizeof(float);
int *a = (int *) malloc(result);
__global__ void parallel_passwordCrack(int length,int*d_output,int *a)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
bool cracked = false;
char alphabetTable[] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' };
int newB[1000];
__shared__ int nIter;
__shared__ int idT;
__shared__ long totalAttempt;
do{
if(idx == 0){
nIter = 0;
totalAttempt = 0;
}
newB[0]++;
for(int i =0; i<length; i++){
if (newB[i] >= 26 + alphabetTable[i]){
newB[i] -= 26;
newB[i+1]++;
}else break;
}
cracked=true;
for(int k=0; k<length; k++)
{
if(newB[k]!=a[k]){
cracked=false;
break;
}else
{
cracked = true;
}
}
if(cracked && nIter == 0){
idT = idx;
break;
}
else if(nIter){
break;
}
totalAttempt++;
}while(!cracked || !nIter);
if(idx == idT){
for(int i = 0; i< length; i++){
d_output[i] = newB[i];
}
}
} |
14,380 | #include "includes.h"
__global__ void bcnn_op_cuda_tanh_kernel(int n, float *x, float *y) {
int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
y[i] = (exp(2 * x[i]) - 1) / (exp(2 * x[i]) + 1);
}
return;
} |
14,381 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include<string.h>
__global__ void sort(char *s, int n, char *b)
{
int id,i,c=0;
id = threadIdx.x;
for(i=0;i<n;i++)
{
if(id==i)
continue;
else if(s[i]<s[id])
{
c++;
}
b[c]=s[id];
}
}
int main(void)
{
char s[100],b[100];
int n,i;
printf("Enter the string\n");
scanf("%s", s);
n = strlen(s);
char *d_s, *d_b;
int size;
size = sizeof(char);
cudaMalloc((void**)&d_s, n*size);
cudaMalloc((void**)&d_b, n*size);
cudaMemcpy(d_s,s,n*size, cudaMemcpyHostToDevice);
sort<<<1,n>>>(d_s,n,d_b);
cudaMemcpy(b,d_b,n*size, cudaMemcpyDeviceToHost);
for(i=0;i<n;i++)
printf("%c",b[i]);
cudaFree(d_s);
cudaFree(d_b);
return 0;
} |
14,382 | #include<cuda.h>
#include<cstdio>
#include<iostream>
using namespace std;
__global__ void kernel(int* vector1,int n){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n){
vector1[idx] *= 2;
}
return;
}
__host__ int main(){
int *vec1 = NULL;
int *cuvec1 = NULL;
cudaStream_t stream1;
cudaStream_t stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
int size = 1600;
int cusize = 160;
cudaMallocHost(&vec1,size * sizeof(int));
for(int i = 0; i < size; i++){
vec1[i] = i;
}
cudaMalloc((void**)&cuvec1, 2 * cusize * sizeof(int));
cudaMemcpy(cuvec1, vec1, cusize * sizeof(int), cudaMemcpyHostToDevice);
int begin = cusize;
int block = 32;
int useSeg = 1;
int grid = cusize/block;
while(begin < size){
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaMemcpyAsync(cuvec1 + (useSeg) * cusize, vec1 + begin, cusize * sizeof(int), cudaMemcpyHostToDevice,stream1);
kernel<<<grid,block,0,stream2>>>(cuvec1 + (1 - useSeg) * cusize,cusize);
cudaMemcpyAsync(vec1 + (begin - cusize), cuvec1 + (1 - useSeg) * cusize, cusize*sizeof(int), cudaMemcpyDeviceToHost,stream1);
cudaDeviceSynchronize();
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
begin += cusize;
useSeg = 1 - useSeg;
}
kernel<<<grid,block>>>(cuvec1 + (1 - useSeg) * cusize,cusize);
cudaMemcpy(vec1 + (begin - cusize), cuvec1 + (1 - useSeg) * cusize, cusize*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < size; i++){
cout << vec1[i] << " ";
}
return 0;
}
|
14,383 | #include "includes.h"
// INCLUDES
// CUDA
// GIS
/**
* PARS
*/
#define BLOCK_DIM_small 64
#define BLOCK_DIM 256
static const unsigned int threads = 512;
bool print_intermediate_arrays = false;
const char *BASE_PATH = "/home/giuliano/git/cuda/reduction";
/*
* kernel labels
*/
const char *kern_0 = "filter_roi";
const char *kern_1 = "imperviousness_change_histc_sh_4" ;
const char *kern_2 = "imperviousness_change" ;
char buffer[255];
/*
* DEFINE I/O files
*/
// I/–
//const char *FIL_ROI = "/home/giuliano/git/cuda/reduction/data/ROI.tif";
//const char *FIL_BIN1 = "/home/giuliano/git/cuda/reduction/data/BIN1.tif";
//const char *FIL_BIN2 = "/home/giuliano/git/cuda/reduction/data/BIN2.tif";
const char *FIL_ROI = "/media/DATI/db-backup/ssgci-data/testing/ssgci_roi.tif";
const char *FIL_BIN1 = "/media/DATI/db-backup/ssgci-data/testing/ssgci_bin.tif";
const char *FIL_BIN2 = "/media/DATI/db-backup/ssgci-data/testing/ssgci_bin2.tif";
// –/O
const char *FIL_LTAKE_grid = "/home/giuliano/git/cuda/reduction/data/LTAKE_map.tif";
const char *FIL_LTAKE_count= "/home/giuliano/git/cuda/reduction/data/LTAKE_count.txt";
/* +++++DEFINEs+++++ */
__global__ void imperviousness_change_char( const unsigned char *dev_BIN1, const unsigned char *dev_BIN2, unsigned int WIDTH, unsigned int HEIGHT, char *dev_LTAKE_map )
{
unsigned long int x = threadIdx.x;
unsigned long int bdx = blockDim.x;
unsigned long int bix = blockIdx.x;
unsigned long int tix = bdx*bix + x; // offset
if( tix < WIDTH*HEIGHT ){
dev_LTAKE_map[tix] = dev_BIN2[tix] - dev_BIN1[tix];
}
} |
14,384 |
#include <stdio.h>
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
int main(){
int n1=2, n2=3, n3;
int *d1, *d2, *d3;
int size = sizeof(int);
cudaMalloc((void **)&d1, size);
cudaMalloc((void **)&d2, size);
cudaMalloc((void **)&d3, size);
cudaMemcpy(d1, &n1, size, cudaMemcpyHostToDevice);
cudaMemcpy(d2, &n2, size, cudaMemcpyHostToDevice);
add<<<1,1>>>(d1, d2, d3);
cudaMemcpy(&n3, d3, size, cudaMemcpyDeviceToHost);
printf("Sum of %d and %d = %d\n", n1, n2, n3);
cudaFree(d1); cudaFree(d2); cudaFree(d3);
return 0;
}
|
14,385 | #include "includes.h"
__global__ void cudaSScaleSign_kernel(unsigned int size, float* input, float* sign, const float scale, const float beta, float* result)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
if (beta != 0.0f) {
for (unsigned int i = index; i < size; i += stride) {
const float sgn = (sign[i] >= 0) ? 1.0f : -1.0f;
result[i] = input[i] * sgn * scale + beta * result[i];
}
}
else {
for (unsigned int i = index; i < size; i += stride) {
const float sgn = (sign[i] >= 0) ? 1.0f : -1.0f;
result[i] = input[i] * sgn * scale;
}
}
} |
14,386 | #include <stdio.h>
#include <stdlib.h>
#define INPUT_SIZE 65536
/*
N.B.: a proper mask size can show the efficiency of tiledConvolution respect to the others.
If mask size is too small divergence of threads in the could result in a cumbersome computation
*/
#define MASK_SIZE 64
#define RADIUS MASK_SIZE/2
#define RES_SIZE (MASK_SIZE+INPUT_SIZE-1)
#define THREAD_PER_BLOCK 64
#define TILE_SIZE THREAD_PER_BLOCK
#define N_BLOCKS (RES_SIZE+THREAD_PER_BLOCK-1)/THREAD_PER_BLOCK
/*
Debug runtime API function
*/
#define CHECK(call){\
const cudaError_t error=call;\
if( error!= cudaSuccess ){\
printf("Error %s %d, ",__FILE__,__LINE__);\
printf("code: %d, reason: %s\n",error,cudaGetErrorString(error));\
exit(1);\
}\
}\
void logResult(int *res);
/*
Compute the convolution between in and mask in the straightforward way
*/
__global__ void convolution_easy(int *in,int *mask,int *res){
int n=blockIdx.x*blockDim.x+threadIdx.x;
int acc=0,j;
if(n<RES_SIZE){
for(j=0;j<MASK_SIZE;j++){
if(n-j>=0 && n-j<INPUT_SIZE)
acc+=in[n-j]*mask[j];
}
res[n]=acc;
}
}
/*
Compute the convolution between in and mask in the straightforward way (alternative version)
*/
__global__ void convolution_alternate(int *in,int *mask,int *res){
int n=blockIdx.x*blockDim.x+threadIdx.x;
int acc=0,j;
int start_point=n-(MASK_SIZE/2);
for(j=0;j<MASK_SIZE;j++){
if(start_point+j>=0 && start_point+j<RES_SIZE){
acc+=in[start_point+j]*mask[j];
}
}
res[n]=acc;
}
/*
Declaring constant memory mask
*/
__constant__ int MASK[MASK_SIZE];
/*
Tiled convolution
N.B: input array must be padded of MASK_SIZE/2(RADIUS) to work
*/
__global__ void tiledConvolution(int *in,int *res){
int n=blockDim.x*blockIdx.x+threadIdx.x;
/*
The effective size of the tile is the tile itself+the halo (2*RADIUS)
*/
__shared__ int tile[TILE_SIZE+MASK_SIZE];
if(n<RES_SIZE){
res[n]=0;
/*
Loading into shared memory left halo
*/
if(threadIdx.x<RADIUS)
/*If at the start of the input, left halo is zero-padding*/
tile[threadIdx.x]= blockIdx.x==0 ? 0 : in[n-RADIUS];
/*
Loading into shared memory right halo
*/
if(threadIdx.x+RADIUS>=TILE_SIZE)
/*If at the end of the input, right halo is zero-padding*/
tile[threadIdx.x+RADIUS*2]= n>=INPUT_SIZE ? 0 : in[n+RADIUS];
/*
Every thread loads at least this single data
*/
tile[threadIdx.x+RADIUS]=in[n];
/*
Synchronize all the threads in the block
*/
__syncthreads();
/*
Compute the convolution output element of which the thread is responsible
*/
for(int i=0; i<MASK_SIZE; i++)
res[n]+=MASK[i]*tile[threadIdx.x+i];
}
}
/*
Tiled convolution alternate version
*/
__global__ void convolution1D(int *result, int *data, int n) {
// shared memory size = TILE + MASK
__shared__ float tile[TILE_SIZE+MASK_SIZE];
// edges
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
int left = blockIdx.x * blockDim.x - RADIUS;
int right = (blockIdx.x + 1) * blockDim.x;
if (threadIdx.x < RADIUS) // left
tile[threadIdx.x] = left < 0 ? 0 : data[left + threadIdx.x];
else if (threadIdx.x >= blockDim.x - RADIUS) // right
tile[threadIdx.x + MASK_SIZE - 1] = right >= n ? 0 : data[right + threadIdx.x - blockDim.x + RADIUS];
// center
tile[threadIdx.x + RADIUS] = data[idx];
__syncthreads();
// convoluzione: tile * mask
int sum = 0;
for (int i = -RADIUS; i <= RADIUS; i++)
sum += tile[threadIdx.x + RADIUS + i] * MASK[i + RADIUS];
// store conv result
result[idx] = sum;
}
int main(){
/*
Allocate memory for input and convolution mask
*/
int *input=(int*)malloc(INPUT_SIZE*sizeof(int)),
/*
This is to have correct output from tiled convolution
*/
*padded_in=(int*)malloc( (INPUT_SIZE+RADIUS)*sizeof(int) ),
*mask=(int*)malloc(MASK_SIZE*sizeof(int)),
*res=(int*)malloc(RES_SIZE*sizeof(int));
int i;
bool log=false;
/*
Declare pointer for device memory
*/
int *gpu_in,*gpu_padded,*gpu_mask,*gpu_res;
/*
Allocate memory on the device
*/
cudaMalloc((void**)&gpu_in,INPUT_SIZE*sizeof(int));
cudaMalloc((void**)&gpu_padded,(INPUT_SIZE+RADIUS)*sizeof(int));
cudaMalloc((void**)&gpu_mask,MASK_SIZE*sizeof(int));
cudaMalloc((void**)&gpu_res,RES_SIZE*sizeof(int));
/*
Fill data arrays with integer values
*/
for(i=0;i<INPUT_SIZE;i++){input[i]=1;}
for(i=0;i<INPUT_SIZE+RADIUS;i++){padded_in[i]= i<RADIUS ? 0:1;}
for(i=0;i<MASK_SIZE;i++){mask[i]=1;}
/*
Print data arrays
*/
if(log){
printf("Input array\n");
for(i=0;i<INPUT_SIZE;i++){printf("%d ",input[i]);}
printf("\n");
printf("Convolution mask\n");
for(i=0;i<MASK_SIZE;i++){printf("%d ",mask[i]);}
printf("\n");
}
/*
Copy data to device memory
*/
cudaMemcpy(gpu_in,input,INPUT_SIZE*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(gpu_mask,mask,MASK_SIZE*sizeof(int),cudaMemcpyHostToDevice);
/*
Call CUDA kernel to perform the convolution product in the straightforward way
*/
convolution_easy<<<N_BLOCKS,THREAD_PER_BLOCK>>>(gpu_in,gpu_mask,gpu_res);
/*
Copy the result from device to host memory
*/
cudaMemcpy(res,gpu_res,RES_SIZE*sizeof(int),cudaMemcpyDeviceToHost);
if(log)
logResult(res);
/*
Copy padded input
*/
cudaMemcpy(gpu_padded,padded_in,(INPUT_SIZE+RADIUS)*sizeof(int),cudaMemcpyHostToDevice);
/*
Test convolution results calling the alternative kernel
*/
convolution_alternate<<<N_BLOCKS,THREAD_PER_BLOCK>>>(gpu_padded,gpu_mask,gpu_res);
cudaMemcpy(res,gpu_res,RES_SIZE*sizeof(int),cudaMemcpyDeviceToHost);
if(log)
logResult(res);
/*
Test convolution results calling the alternative kernel
*/
convolution_alternate<<<N_BLOCKS,THREAD_PER_BLOCK>>>(gpu_padded,gpu_mask,gpu_res);
cudaMemcpy(res,gpu_res,RES_SIZE*sizeof(int),cudaMemcpyDeviceToHost);
/*
Copy the mask from host array to the constant memory
*/
cudaMemcpyToSymbol(MASK,mask,MASK_SIZE*sizeof(int));
/*
Call the CUDA kernel to compute tiled version of convolution
*/
tiledConvolution<<<N_BLOCKS,THREAD_PER_BLOCK>>>(gpu_padded,gpu_res);
cudaMemcpy(res,gpu_res,RES_SIZE*sizeof(int),cudaMemcpyDeviceToHost);
if(log)
logResult(res);
/*
Call the CUDA kernel to compute tiled version of convolution
*/
convolution1D<<<N_BLOCKS,THREAD_PER_BLOCK>>>(gpu_res,gpu_padded,INPUT_SIZE+RADIUS);
cudaMemcpy(res,gpu_res,RES_SIZE*sizeof(int),cudaMemcpyDeviceToHost);
if(log)
logResult(res);
/*
Free devcie memory
*/
cudaFree(gpu_in);
cudaFree(gpu_padded);
cudaFree(gpu_mask);
cudaFree(gpu_res);
/*
Free host memory
*/
free(input);
free(padded_in);
free(mask);
}
void logResult(int *res){
int i;
printf("Convolution mask: %d\n",MASK_SIZE);
printf("Input array size: %d\n",INPUT_SIZE);
printf("Result array size: %d\n",RES_SIZE);
printf("Number of blocks: %d\n",N_BLOCKS);
/*
Display result
*/
printf("Result\n\n");
for(i=0;i<RES_SIZE;i++){printf("%d ",res[i]);}
printf("\n");
} |
14,387 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
using namespace std;
#define N 10
__global__ void add(int *a, int *b, int *c) {
int tid = blockIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main(void) {
int a[N], b[N], c[N], userInput, numToAdd;
int *dev_a, *dev_b, *dev_c;
srand((unsigned)time(NULL));
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));
//receive input from user
printf("Choose the numbers for the arrays\n");
printf("1 to fill automatically or 2 to fill manually: ");
scanf("%d", &userInput);
//process user input
if (userInput == 1) {
for (int i = 0; i < N; i++) {
a[i] = rand() % 20;
b[i] = rand() % 10;
}
} else {
for (int i = 0; i < N; i++) {
printf("Choose a number %d for A: ", i+1);
scanf("%d", &numToAdd);
a[i] = numToAdd;
printf("Choose a number %d for B: ", i+1);
scanf("%d", &numToAdd);
b[i] = numToAdd;
}
}
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
add<<<N, 1 >>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
//free unused memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
|
14,388 | // About nucleotide.
#define CHARACTER_CARDINALITY 4 /**< One character is either A, C, G, or T. */
__constant__ unsigned int *scodon; /**< The special codon array. */
__constant__ unsigned int character_count; /**< Number of characters. */
__constant__ unsigned int overlapping_character_count; /**< Number of overlapping characters between two consecutive threads. */
__constant__ unsigned int overlapping_scodon_count; /**< Number of overlapping special codons between two consecutive threads. */
// About agrep algorithm.
__constant__ unsigned int mask_array_32[CHARACTER_CARDINALITY]; /**< The 32-bit mask array of pattern. */
__constant__ unsigned long long mask_array_64[CHARACTER_CARDINALITY]; /**< The 64-bit mask array of pattern. */
__constant__ unsigned int test_bit_32; /**< The test bit for determining matches of patterns of length 32. */
__constant__ unsigned long long test_bit_64; /**< The test bit for determining matches of patterns of length 64. */
// About result.
__constant__ unsigned int max_match_count; /**< Maximum number of matches of one single query. */
__constant__ unsigned int *match; /**< The match array. */
__device__ volatile unsigned int match_count; /**< Number of matches. */
// About CUDA implementation.
#define MAX_UNSIGNED_INT 0xffffffffUL /**< The maximum value of an unsigned int. */
#define MAX_UNSIGNED_LONG_LONG 0xffffffffffffffffULL /**< The maximum value of an unsigned long long. */
#define B 7 /**< Each thread block consists of 2^B (=1<<B) threads. */
#define L 8 /**< Each thread processes 2^L (=1<<L) special codons plus those in the overlapping zone of two consecutive threads. */
/**
* The CUDA agrep kernel with matching tables of 32 bits and edit distance of 0.
* All the necessary parameters are stored in constant memory.
*/
template<unsigned int KI>
__global__ void agrepKernel32()
{
// About CUDA implementation.
extern __shared__ unsigned int scodon_header[][1 << B]; // Used to store the first overlapping_scodon_count special codons of each thread of a thread block.
unsigned int block_base_index; // The base index of current thread block.
unsigned int inputting_scodon_base_index; // The base index into inputting special codon of current thread.
unsigned int scodon_index; // Used to enumerate the 2^L (=1<<L) special codons plus those in the overlapping zone of two consecutive threads.
unsigned int scodon_buffer; // The special codon currently being processed.
// About agrep algorithm.
unsigned int character_index; // Used to enumerate the special codon buffer.
unsigned int mask_word; // The mask word of a character from mask array.
unsigned int r[KI + 1]; // The most recent columns of K+1 matching tables.
unsigned int r0; // The second most recent column of previous matching table.
unsigned int r1; // The most recent column of previous matching table.
unsigned int r2; // The second most recent column of current matching table.
unsigned int r3; // The most recent column of current matching table. r3 = function(r0, r1, r2, mask_value);
unsigned int k; // Used to enumerate K+1 matching tables.
// About result.
unsigned int outputting_scodon_base_index; // The base index into outputting special codon of current thread.
unsigned int matching_character_index; // The output of the kernel. It stores the matching ending position.
block_base_index = blockIdx.x << (L + B); // The base index of current thread block.
inputting_scodon_base_index = block_base_index + threadIdx.x; // Coalesced global memory access is ensured.
outputting_scodon_base_index = block_base_index + (threadIdx.x << L); // Original order of corpus.
r[0] = MAX_UNSIGNED_INT;
for (k = 1; k <= KI; k++)
r[k] = r[k - 1] << 1; // Initialize K+1 matching tables according to agrep algorithm.
for (scodon_index = 0; scodon_index < overlapping_scodon_count - 1; scodon_index++)
{
scodon_buffer = scodon[inputting_scodon_base_index + (scodon_index << B)];
for (character_index = 0; character_index < 16; character_index++)
{
mask_word = mask_array_32[(scodon_buffer >> (character_index << 1)) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
}
scodon_header[scodon_index][threadIdx.x] = scodon_buffer;
}
scodon_buffer = scodon[inputting_scodon_base_index + (scodon_index << B)];
for (character_index = 0; character_index < overlapping_character_count - ((overlapping_scodon_count - 1) << 4); character_index++)
{
mask_word = mask_array_32[(scodon_buffer >> (character_index << 1)) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
}
for (; character_index < 16; character_index++)
{
mask_word = mask_array_32[(scodon_buffer >> (character_index << 1)) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
/* A possible match is found.
* 1) Calculate the matching character index, and ensure it does not exceed the corpus boundary.
* 2) Atomically increase match_count by 1, whose original value points to the index that the current match should be saved at.
* 3) Save the matching character index to the match array, if the max number of matches has not yet been exceeded.
*/
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + character_index;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
}
scodon_header[scodon_index][threadIdx.x] = scodon_buffer;
__syncthreads(); // Make sure all the threads of current thread block have saved their first overlapping_scodon_count special codons to the shared memory for later use by the previous thread.
for (scodon_index++; scodon_index < (1 << L); scodon_index++) // These special codons at index [overlapping_scodon_count, 2^L) are processed by current thread only once, hence no need to save them into shared memory.
{
scodon_buffer = scodon[inputting_scodon_base_index + (scodon_index << B)];
mask_word = mask_array_32[(scodon_buffer >> 0) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 0;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 2) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 1;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 4) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 2;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 6) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 3;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 8) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 4;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 10) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 5;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 12) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 6;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 14) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 7;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 16) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 8;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 18) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 9;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 20) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 10;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 22) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 11;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 24) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 12;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 26) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 13;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 28) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 14;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_32[(scodon_buffer >> 30) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 15;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
}
for (scodon_index = 0; scodon_index < overlapping_scodon_count - 1; scodon_index++)
{
scodon_buffer = ((threadIdx.x == (blockDim.x - 1)) ? scodon[block_base_index + (1 << (L + B)) + (scodon_index << B)] : scodon_header[scodon_index][threadIdx.x + 1]);
for (character_index = 0; character_index < 16; character_index++)
{
mask_word = mask_array_32[(scodon_buffer >> (character_index << 1)) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + (1 << L) + scodon_index) << 4) + character_index;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
}
}
scodon_buffer = ((threadIdx.x == (blockDim.x - 1)) ? scodon[block_base_index + (1 << (L + B)) + (scodon_index << B)] : scodon_header[scodon_index][threadIdx.x + 1]);
for (character_index = 0; character_index < overlapping_character_count - ((overlapping_scodon_count - 1) << 4); character_index++)
{
mask_word = mask_array_32[(scodon_buffer >> (character_index << 1)) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_32) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + (1 << L) + scodon_index) << 4) + character_index;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
}
}
/**
* The CUDA agrep kernel with matching tables of 64 bits and edit distance of 0.
* All the necessary parameters are stored in constant memory.
*/
template<unsigned int KI>
__global__ void agrepKernel64()
{
// About CUDA implementation.
extern __shared__ unsigned int scodon_header[][1 << B]; // Used to store the first overlapping_scodon_count special codons of each thread of a thread block.
unsigned int block_base_index; // The base index of current thread block.
unsigned int inputting_scodon_base_index; // The base index into inputting special codon of current thread.
unsigned int scodon_index; // Used to enumerate the 2^L (=1<<L) special codons plus those in the overlapping zone of two consecutive threads.
unsigned int scodon_buffer; // The special codon currently being processed.
// About agrep algorithm.
unsigned int character_index; // Used to enumerate the special codon buffer.
unsigned long long mask_word; // The mask word of a character from mask array.
unsigned long long r[KI + 1]; // The most recent columns of K+1 matching tables.
unsigned long long r0; // The second most recent column of previous matching table.
unsigned long long r1; // The most recent column of previous matching table.
unsigned long long r2; // The second most recent column of current matching table.
unsigned long long r3; // The most recent column of current matching table. r3 = function(r0, r1, r2, mask_value);
unsigned int k; // Used to enumerate K+1 matching tables.
// About result.
unsigned int outputting_scodon_base_index; // The base index into outputting special codon of current thread.
unsigned int matching_character_index; // The output of the kernel. It stores the matching ending position.
block_base_index = blockIdx.x << (L + B); // The base index of current thread block.
inputting_scodon_base_index = block_base_index + threadIdx.x; // Coalesced global memory access is ensured.
outputting_scodon_base_index = block_base_index + (threadIdx.x << L); // Original order of corpus.
r[0] = MAX_UNSIGNED_LONG_LONG;
for (k = 1; k <= KI; k++)
r[k] = r[k - 1] << 1; // Initialize K+1 matching tables according to agrep algorithm.
for (scodon_index = 0; scodon_index < overlapping_scodon_count - 1; scodon_index++)
{
scodon_buffer = scodon[inputting_scodon_base_index + (scodon_index << B)];
for (character_index = 0; character_index < 16; character_index++)
{
mask_word = mask_array_64[(scodon_buffer >> (character_index << 1)) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
}
scodon_header[scodon_index][threadIdx.x] = scodon_buffer;
}
scodon_buffer = scodon[inputting_scodon_base_index + (scodon_index << B)];
for (character_index = 0; character_index < overlapping_character_count - ((overlapping_scodon_count - 1) << 4); character_index++)
{
mask_word = mask_array_64[(scodon_buffer >> (character_index << 1)) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
}
for (; character_index < 16; character_index++)
{
mask_word = mask_array_64[(scodon_buffer >> (character_index << 1)) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
/* A possible match is found.
* 1) Calculate the matching character index, and ensure it does not exceed the corpus boundary.
* 2) Atomically increase match_count by 1, whose original value points to the index that the current match should be saved at.
* 3) Save the matching character index to the match array, if the max number of matches has not yet been exceeded.
*/
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + character_index;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
}
scodon_header[scodon_index][threadIdx.x] = scodon_buffer;
__syncthreads(); // Make sure all the threads of current thread block have saved their first overlapping_scodon_count special codons to the shared memory for later use by the previous thread.
for (scodon_index++; scodon_index < (1 << L); scodon_index++) // These special codons at index [overlapping_scodon_count, 2^L) are processed by current thread only once, hence no need to save them into shared memory.
{
scodon_buffer = scodon[inputting_scodon_base_index + (scodon_index << B)];
mask_word = mask_array_64[(scodon_buffer >> 0) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 0;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 2) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 1;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 4) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 2;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 6) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 3;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 8) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 4;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 10) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 5;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 12) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 6;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 14) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 7;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 16) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 8;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 18) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 9;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 20) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 10;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 22) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 11;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 24) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 12;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 26) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 13;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 28) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 14;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
mask_word = mask_array_64[(scodon_buffer >> 30) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + scodon_index) << 4) + 15;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
}
for (scodon_index = 0; scodon_index < overlapping_scodon_count - 1; scodon_index++)
{
scodon_buffer = ((threadIdx.x == (blockDim.x - 1)) ? scodon[block_base_index + (1 << (L + B)) + (scodon_index << B)] : scodon_header[scodon_index][threadIdx.x + 1]);
for (character_index = 0; character_index < 16; character_index++)
{
mask_word = mask_array_64[(scodon_buffer >> (character_index << 1)) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + (1 << L) + scodon_index) << 4) + character_index;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
}
}
scodon_buffer = ((threadIdx.x == (blockDim.x - 1)) ? scodon[block_base_index + (1 << (L + B)) + (scodon_index << B)] : scodon_header[scodon_index][threadIdx.x + 1]);
for (character_index = 0; character_index < overlapping_character_count - ((overlapping_scodon_count - 1) << 4); character_index++)
{
mask_word = mask_array_64[(scodon_buffer >> (character_index << 1)) & 3];
r2 = r[0];
r3 = (r2 << 1) | mask_word;
r[0] = r3;
for (k = 1; k <= KI; k++)
{
r0 = r2;
r1 = r3;
r2 = r[k];
r3 = ((r2 << 1) | mask_word) & ((r0 & r1) << 1) & r0;
r[k] = r3;
}
if (!(r3 & test_bit_64) && (match_count < max_match_count))
{
matching_character_index = ((outputting_scodon_base_index + (1 << L) + scodon_index) << 4) + character_index;
if (matching_character_index <= character_count)
match[atomicAdd((unsigned int *)&match_count, 1)] = matching_character_index;
}
}
}
/**
* Transfer necessary parameters to CUDA constant memory.
* This agrep kernel initialization should be called only once for searching the same corpus.
* @param[in] scodon_arg The special codon array.
* @param[in] character_count_arg Actual number of characters.
* @param[in] match_arg The match array.
* @param[in] max_match_count_arg Maximum number of matches of one single query.
*/
extern "C" void initAgrepKernel(const unsigned int *scodon_arg, const unsigned int character_count_arg, const unsigned int *match_arg, const unsigned int max_match_count_arg)
{
cudaMemcpyToSymbol(scodon, &scodon_arg, sizeof(unsigned int *));
cudaMemcpyToSymbol(character_count, &character_count_arg, sizeof(unsigned int));
cudaMemcpyToSymbol(match, &match_arg, sizeof(unsigned int *));
cudaMemcpyToSymbol(max_match_count, &max_match_count_arg, sizeof(unsigned int));
}
/**
* Transfer 32-bit mask array and test bit from host to CUDA constant memory.
* @param[in] mask_array_arg The mask array of a pattern.
* @param[in] test_bit_arg The test bit.
*/
extern "C" void transferMaskArray32(const unsigned int *mask_array_arg, const unsigned int test_bit_arg)
{
cudaMemcpyToSymbol(mask_array_32, mask_array_arg, sizeof(unsigned int) * CHARACTER_CARDINALITY);
cudaMemcpyToSymbol(test_bit_32, &test_bit_arg, sizeof(unsigned int));
}
/**
* Transfer 64-bit mask array and test bit from host to CUDA constant memory.
* @param[in] mask_array_arg The mask array of a pattern.
* @param[in] test_bit_arg The test bit.
*/
extern "C" void transferMaskArray64(const unsigned long long *mask_array_arg, const unsigned long long test_bit_arg)
{
cudaMemcpyToSymbol(mask_array_64, mask_array_arg, sizeof(unsigned long long) * CHARACTER_CARDINALITY);
cudaMemcpyToSymbol(test_bit_64, &test_bit_arg, sizeof(unsigned long long));
}
/**
* Invoke the cuda implementation of agrep kernel.
* @param[in] m Pattern length.
* @param[in] k Edit distance.
* @param[in] block_count Number of thread blocks.
*/
extern "C" void invokeAgrepKernel(const unsigned int m, const unsigned int k, const unsigned int block_count)
{
unsigned int overlapping_character_count_init = m + k - 1;
unsigned int overlapping_scodon_count_init = (overlapping_character_count_init + 16 - 1) >> 4;
unsigned int scodon_header_size = (sizeof(unsigned int) << B) * overlapping_scodon_count_init; // Used to allocate dynamic shared memory. The first overlapping_scodon_count_init special codons of each thread will be saved into shared memory for the previous thread to continue processing.
unsigned int match_count_init = 0;
cudaMemcpyToSymbol(overlapping_character_count, &overlapping_character_count_init, sizeof(unsigned int));
cudaMemcpyToSymbol(overlapping_scodon_count, &overlapping_scodon_count_init, sizeof(unsigned int));
cudaMemcpyToSymbol(match_count, &match_count_init, sizeof(unsigned int));
if (m <= 32)
{
switch (k)
{
case 0:
agrepKernel32<0><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 1:
agrepKernel32<1><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 2:
agrepKernel32<2><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 3:
agrepKernel32<3><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 4:
agrepKernel32<4><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 5:
agrepKernel32<5><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 6:
agrepKernel32<6><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 7:
agrepKernel32<7><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 8:
agrepKernel32<8><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 9:
agrepKernel32<9><<<block_count, 1 << B, scodon_header_size>>>();
break;
}
}
else // m > 32
{
switch (k)
{
case 0:
agrepKernel64<0><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 1:
agrepKernel64<1><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 2:
agrepKernel64<2><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 3:
agrepKernel64<3><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 4:
agrepKernel64<4><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 5:
agrepKernel64<5><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 6:
agrepKernel64<6><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 7:
agrepKernel64<7><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 8:
agrepKernel64<8><<<block_count, 1 << B, scodon_header_size>>>();
break;
case 9:
agrepKernel64<9><<<block_count, 1 << B, scodon_header_size>>>();
break;
}
}
}
/**
* Get the number of matches from CUDA constant memory.
* @param[out] match_count_arg Number of matches.
*/
extern "C" void getMatchCount(unsigned int *match_count_arg)
{
cudaMemcpyFromSymbol(match_count_arg, match_count, sizeof(unsigned int));
}
|
14,389 |
// Babak Poursartip
// 02/14/2021
// CUDA
//topic: scan
#include <cstdio>
#include <ctime>
#include <iostream>
// ==============================
__global__ void sum(int *d)
{
int tds = blockDim.x;
int tid = threadIdx.x;
// tc: total number of threads
for (int tc = tds, stepSize = 1; tc > 0; tc /=2, stepSize *=2) // changes the number of threads by half(tc>>=1)
{
// thread must be allowed to write
if (tid < tc)
{
d[tid+stepSize] += d[tid];
# if __CUDA_ARCH__>=200
printf("%d, %d, %d, %d \n", tds, tid, stepSize, tc);
#endif
}
tc -=stepSize;
}
}
// ==============================
int main()
{
printf(" starts \n");
const int count = 16;
const int size = count * sizeof(int);
int h[count];
for (int i = 0; i < count; ++i)
h[i] = i + 1;
int *d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
sum<<<1, count-1>>>(d);
cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < count; ++i)
std::cout << h[i] << " ";
std::cout << std::endl;
cudaFree(d);
printf(" done \n");
return 0;
}
|
14,390 | #include "includes.h"
__global__ void kernelCalculateHistogram(unsigned int* histogram, unsigned char* rawPixels, long chunkSize, long totalPixels)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int startPosition = id * chunkSize;
for (int i = startPosition; i < (startPosition + chunkSize); i++) {
if (i < totalPixels) {
int pixelValue = (int)rawPixels[i];
atomicAdd(&histogram[pixelValue], 1);
}
}
} |
14,391 | #include "includes.h"
__global__ void naiveGmemUnroll(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = 2 * blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int ti = iy * nx + ix;
unsigned int to = ix * ny + iy;
if (ix + blockDim.x < nx && iy < ny)
{
out[to] = in[ti];
out[to + ny * blockDim.x] = in[ti + blockDim.x];
}
} |
14,392 | #include <cstdio>
#include <cstdlib>
#include <math.h>
#include <time.h>
#define MINVAL 0.00
#define MAXVAL 10.0
#define TOL 1e-5
double CPS = 2.9e9;
//////////////////////////// CUDA RELATED ////////////////////////////////////
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void SOR_kernel(float* arr, int len, float OMEGA)
{
// First get the index
int idx = (threadIdx.x + (blockDim.x * blockIdx.x)) * len + (threadIdx.y + (blockDim.y * blockIdx.y));
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
// check if either index places the thread on a fixed element
if(((x != 0) && (x != 2047)) && ((y != 0) && (y != 2047)))
{
float change = arr[idx] - 0.25 * (arr[idx - len] + arr[idx + len] + arr[idx + 1] + arr[idx - 1]);
arr[idx] -= change * OMEGA;
}
}
////////////////////////////// MATRIX /////////////////////////////////////////
float* matrix_create(int len);
int matrix_init(float* mat, int len);
int matrix_zero(float* mat, int len);
int matrix_copy(float* src, float* dst, int len);
void SOR_CPU(float* mat, int len, float OMEGA);
///////////////// Time related //////////////////////////////
//rdtsc related
typedef union {
unsigned long long int64;
struct {unsigned int lo, hi;} int32;
} mcps_tctr;
#define MCPS_RDTSC(cpu_c) __asm__ __volatile__ ("rdtsc" : \
"=a" ((cpu_c).int32.lo), "=d"((cpu_c).int32.hi))
int clock_gettime(clockid_t clk_id, struct timespec *tp);
struct timespec diff(struct timespec start, struct timespec end);
double ts_ms(struct timespec ts);
struct timespec ts_diff(struct timespec start, struct timespec end);
double measure_cps(void);
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[])
{
int LEN = 2048;
int size = LEN * LEN * sizeof(float);
float OMEGA = 1.97;
// CUDA Timing
cudaEvent_t start, stop;
float d_time;
// CPU timing
struct timespec time1, time2;
double h_time;
float *h_mat, *d_mat, *h_res;
// set up on host
measure_cps();
h_mat = matrix_create(LEN);
if(!h_mat) return 0;
if(!matrix_init(h_mat, LEN)) return 0;
h_res = matrix_create(LEN);
if(!h_res) return 0;
if(!matrix_copy(h_mat, h_res, LEN)) return 0; // copy so we can loop memcpy with device easily
// set up device
d_mat = NULL;
CUDA_SAFE_CALL(cudaSetDevice(0));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_mat, size));
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 dimBlock(16, 16, 1);
dim3 dimGrid(128, 128, 1);
// begin GPU Work
cudaEventRecord(start, 0);
for(int i = 0; i < 2000; i++)
{
CUDA_SAFE_CALL(cudaMemcpy(d_mat, h_res, size, cudaMemcpyHostToDevice));
SOR_kernel<<<dimGrid, dimBlock>>>(d_mat, LEN, OMEGA);
CUDA_SAFE_CALL(cudaPeekAtLastError());
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUDA_SAFE_CALL(cudaMemcpy(h_res, d_mat, size, cudaMemcpyDeviceToHost));
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&d_time, start, stop);
printf("\nGPU time: %f (msec)\n", d_time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// CPU SOR and comparison
clock_gettime(CLOCK_REALTIME, &time1);
SOR_CPU(h_mat, LEN, OMEGA);
clock_gettime(CLOCK_REALTIME, &time2);
h_time = ts_ms(ts_diff(time1, time2));
printf("\nCPU time %lf (msec)\n", h_time);
int i, num_elements;
num_elements = LEN * LEN;
for(i = 0; i < num_elements; i++)
{
if((h_mat - h_res) > (float) TOL)
{
printf("\nResult verification failed at element %d\n", i);
return 0;
}
}
// Free stuff
CUDA_SAFE_CALL(cudaFree(d_mat));
free(h_res);
free(h_mat);
printf("\nDone\n");
return 0;
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////// MATRIX IMPLEMENTATIONS ////////////////////////////////////////
float float_rand(float min, float max)
{
float f = (float)random()/RAND_MAX;
return min + f * (max - min);
}
float* matrix_create(int len)
{
float* arr;
if(len > 0)
{
arr = (float*) calloc(len*len, sizeof(float));
if(!arr)
{
printf("\n\tFailed to allocate array\n");
return NULL;
}
}
else return NULL;
return arr;
}
int matrix_init(float* mat, int len)
{
int len_sq, i;
if(len > 0)
{
len_sq = len * len;
for (i = 0; i < len_sq; i++)
{
mat[i] = float_rand(MINVAL, MAXVAL);
}
return 1;
}
printf("\nError in initializing matrix\n");
return 0;
}
int matrix_zero(float* mat, int len)
{
int len_sq, i;
if(len > 0)
{
len_sq = len * len;
for(i = 0; i < len_sq; i++)
{
mat[i] = 0;
}
return 1;
}
printf("\nFailed to zero matrix\n");
return 0;
}
int matrix_copy(float* src, float* dst, int len)
{
int len_sq, i;
if(len > 0)
{
len_sq = len * len;
for(i = 0; i < len_sq; i++)
{
dst[i] = src[i];
}
return 1;
}
printf("\nFailed to copy matrix\n");
return 0;
}
void SOR_CPU(float* mat, int len, float OMEGA)
{
int i, j, k;
float change = 0;
int q_idx;
for(k = 0; k < 2000; k++)
{
for(i = 0; i < len; i++)
{
for(j = 0; j < len; j++)
{
q_idx = i * len + j;
change = mat[q_idx] - 0.25 * (mat[q_idx-len] + mat[q_idx+len] + mat[q_idx-1] +mat[q_idx+1]);
mat[q_idx] -= change * OMEGA;
}
}
}
}
///////////////////////////// Timing related ///////////////////////////////
double ts_ms(struct timespec ts)
{
return ((((double)(ts.tv_sec))*1.0e9) + ((double)(ts.tv_nsec)))/(1.0e6);
}
/* ---------------------------------------------------------------------------
| Make the CPU busy, and measure CPS (cycles per second).
|
| Explanation:
| If tests are very fast, they can run so quickly that the SpeedStep control
| (in kernel and/or on-chip) doesn't notice in time, and the first few tests
| might finish while the CPU is still in its sleep state (about 800 MHz,
| judging from my measurements)
| A simple way to get around this is to run some kind of busy-loop that
| forces the OS and/or CPU to notice it needs to go to full clock speed.
| We print out the results of the computation so the loop won't get optimised
| away.
|
| Copy this code into other programs as desired. It provides three entry
| points:
|
| double ts_sec(ts): converts a timespec into seconds
| timespec ts_diff(ts1, ts2): computes interval between two timespecs
| measure_cps(): Does the busy loop and prints out measured CPS (cycles/sec)
--------------------------------------------------------------------------- */
struct timespec ts_diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
double measure_cps()
{
struct timespec cal_start, cal_end;
mcps_tctr tsc_start, tsc_end;
double total_time;
double total_cycles;
/* We perform a chaotic iteration and print the result, to defeat
compiler optimisation */
double chaosC = -1.8464323952913974; double z = 0.0;
long int i, ilim, j;
/* Do it twice and throw away results from the first time; this ensures the
* OS and CPU will notice it's busy and set the clock speed. */
for(j=0; j<2; j++) {
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_start);
MCPS_RDTSC(tsc_start);
ilim = 50*1000*1000;
for (i=0; i<ilim; i++)
z = z * z + chaosC;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_end);
MCPS_RDTSC(tsc_end);
}
total_time = ts_ms(ts_diff(cal_start, cal_end));
total_cycles = (double)(tsc_end.int64-tsc_start.int64);
CPS = total_cycles / total_time;
printf("z == %f, CPS == %g\n", z, CPS);
return CPS;
}
/* ---------------------------------------------------------------------------
| End of measure_cps code
--------------------------------------------------------------------------- */
struct timespec diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
|
14,393 | /*
* Name: Simple cuRand based random number generator
* File: simpleRandomNumbers.cu
* Description: This file contains a simple CUDA kernel to generate
* a matrix of distinct random numbers
* Author: kmmankad (kmmankad@gmail.com kmankad@ncsu.edu)
* License: MIT License
*
*/
#include <stdio.h>
// Pull in the curand headers
#include <curand.h>
#include <curand_kernel.h>
// We'll use the time as seed
#include <ctime>
// The all-important CUDA error
// checking macros
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
// X, Y dimensions of the output matrix
#ifndef NumOfRand_X
#define NumOfRand_X 32
#endif
#ifndef NumOfRand_Y
#define NumOfRand_Y 32
#endif
#define NumOfRand (NumOfRand_X * NumOfRand_Y)
// Block Size
#define NUM_THREADS_X 32
#define NUM_THREADS_Y 32
// CUDA Kernel to initialize the random generator 'states'
__global__ void InitRandGen (int RandSeed, curandState_t* RandStates){
int thread_x = blockIdx.x * blockDim.x + threadIdx.x;
int thread_y = blockIdx.y * blockDim.y + threadIdx.y;
int thread_num = thread_x * NUM_THREADS_X + thread_y;
if (thread_num < NumOfRand) {
// Initialization is much faster if sequence number and offset
// are kept at zero, and instead a different seed is used.
// See - https://devtalk.nvidia.com/default/topic/480586/curand-initialization-time/?offset=4
curand_init(RandSeed+thread_num, /* sequence number */ 0, /* sequence offset */ 0, &RandStates[thread_num]);
}
}
__global__ void RandGen (int* GPUNums, curandState_t* RandStates){
int thread_x = blockIdx.x * blockDim.x + threadIdx.x;
int thread_y = blockIdx.y * blockDim.y + threadIdx.y;
int thread_num = thread_x * NUM_THREADS_X + thread_y;
if (thread_num < NumOfRand){
GPUNums[thread_num] = curand(&RandStates[thread_num]) % 100;
}
}
int main(){
// Allocate memory for the array of
// random numbers that we want
int* GPUNums;
// Define a pointer for the cuRandStates
curandState_t* RandStates;
// Allocate the memory for the output nums
CUDA_CALL(cudaMallocManaged((void**)&GPUNums, sizeof(int) * NumOfRand));
// Allocate memory for the different curandStates on each core
CUDA_CALL(cudaMallocManaged((void**)&RandStates, sizeof(curandState) * NumOfRand));
// Launch params
dim3 BlockSize (NUM_THREADS_X, NUM_THREADS_Y, 1);
dim3 GridSize((NumOfRand_X/NUM_THREADS_X)+1, (NumOfRand_Y/NUM_THREADS_Y)+1, 1);
// Launch the Initialization kernel
InitRandGen<<<GridSize,BlockSize>>>(10, RandStates);
CUDA_CHECK();
CUDA_CALL( cudaDeviceSynchronize() );
// Launch the actual generator kernel
RandGen<<<GridSize, BlockSize>>> (GPUNums, RandStates);
CUDA_CHECK();
CUDA_CALL( cudaDeviceSynchronize());
// Just print some for examination
for (int i=0; i<40; i++){
printf ("%0d ", GPUNums[i]);
if(i%10 == 9) {
printf(" \n");
}
}
return 0;
}
|
14,394 | #include <stdio.h>
__global__ void sum_test(float *a, float *b) {
int tid = threadIdx.x;
b[0] = 0;
__syncthreads();
// b[0] = a[tid] + 2;
// printf("a[%d] is %2.2f\n", tid, a[tid]);
// printf("the thread id is %d\n", tid);
// printf("b[0] is: %2.1f\n", b[0]);
// atomicAdd(&a[tid], 1);
atomicAdd(&a[tid], 1);
b[0] += a[tid];
}
__global__ void hist_compute(int *a, int *hist) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = tid + bid * blockDim.x;
// printf("a[%d] is %d\n", idx, a[idx]);
// hist[a[idx]] += 1;
atomicAdd(&hist[a[idx]], 1);
}
int main(int argc, char* argv[]) {
int pixel_num = 5120;
int a[pixel_num];
int length = 10;
for (int i = 0; i < pixel_num; i++) {
a[i] = i * (i + 1) % length;
// printf("a[%d]=%d\n", i, a[i]);
}
int *hist = new int[length]();
for (int i = 0; i < pixel_num; i++) {
hist[a[i]] += 1;
}
for (int i = 0; i < length; i++) {
printf("hist[%d]=%d\n", i, hist[i]);
}
int *aGpu, *histGpu;
int hist2[length];
cudaMalloc((void**)&aGpu, pixel_num * sizeof(int));
cudaMalloc((void**)&histGpu, length * sizeof(int));
cudaMemcpy(aGpu, a, pixel_num * sizeof(int), cudaMemcpyHostToDevice);
hist_compute<<<pixel_num / 512, 512>>>(aGpu, histGpu);
cudaMemcpy(hist2, histGpu, length * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < length; i++) {
printf("hist[%d]=%d\n", i, hist2[i]);
}
return 0;
}
|
14,395 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <inttypes.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define BLOCK_WIDTH 32
#define TAILLE 4096
#define gettime(t) clock_gettime(CLOCK_MONOTONIC_RAW, t)
#define get_sub_seconde(t) (1e-9*(double)t.tv_nsec)
/** return time in second
*/
double get_elapsedtime(void)
{
struct timespec st;
int err = gettime(&st);
if (err !=0) return 0;
return (double)st.tv_sec + get_sub_seconde(st);
}
void init(double* A, double* B, double* C, int size)
{
int i = 0;
srand(2020);
for(i = 0; i < size; i++)
{
A[i] = i * 1.;
B[i] = i * 1.;
C[i] = 0.0;
}
}
void add(double* A, double* B, double* C, int size)
{
int i = 0;
for(i = 0; i < size; i++)
{
C[i] = A[i] + B[i];
}
}
// QUESTION 4
__global__
void AddVecKernel(double* A, double* B, double* C, int N)
{
// QUESTION 6
int col = threadIdx.x + blockDim.x * blockIdx.x;
// FIN QUESTION 6
// QUESTION 7
if((col < N))
{
C[col] = A[col] + B[col];
}
// FIN QUESTION 7
}
// FIN QUESTION 4
int main(int argc, char** argv){
int N;
double *A;
double *B;
double *C;
double *C_bis;
double t0 = 0., t1 = 0., duration = 0.;
N = (argc < 2)?1000:atoi(argv[1]);
fprintf(stdout, "Vectors addition\n Size: %d\n", N);
// Memory allocation
A = (double*) malloc(sizeof(double) * N);
B = (double*) malloc(sizeof(double) * N);
C = (double*) malloc(sizeof(double) * N);
C_bis = (double*) malloc(sizeof(double) * N);
// Value initialization
init(A, B, C, N);
// QUESTION 8
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//FIN QUESTION 8
// QUESTION 1
double *d_A, *d_B, *d_C;
cudaMalloc(&d_A, sizeof(double) * N);
cudaMalloc(&d_B, sizeof(double) * N);
cudaMalloc(&d_C, sizeof(double) * N);
// FIN QUESTION 1
// QUESTION 2
cudaMemcpy(d_A, A, sizeof(double) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeof(double) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, sizeof(double) * N, cudaMemcpyHostToDevice);
// FIN QUESTION 2
// QUESTION 3
int nbBlocks = N / BLOCK_WIDTH;
if(N % BLOCK_WIDTH) nbBlocks++;
dim3 gridSize(nbBlocks);
dim3 blockSize(BLOCK_WIDTH);
// FIN QUESTION 3
// QUESTION 4
cudaEventRecord(start); // QUESTION 8
AddVecKernel<<<gridSize, blockSize>>>(d_A, d_B, d_C, N);
cudaEventRecord(stop); // QUESTION 8
// FIN QUESTION 4
// QUESTION 5
cudaMemcpy(C, d_C, sizeof(double) * N, cudaMemcpyDeviceToHost);
// FIN QUESTION 5
// QUESTION 8
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Vecteur %d\n\tTemps: %f s\n", N, milliseconds/1000);
// FIN QUESTION 8
// Compute multiplication
t0 = get_elapsedtime();
add(A, B, C_bis, N);
t1 = get_elapsedtime();
for(int i = 0; i < N; ++i)
{
if(C[i] != C_bis[i])
{
fprintf(stderr, "FATAL ERROR ! [%d : GPU: %f != CPU: %f]\n", i, C[i], C_bis[i]);
exit(-1);
}
}
// Pretty print
duration = (t1 - t0);
uint64_t nb_op = N;
fprintf(stdout, "Performance results: \n");
fprintf(stdout, " Time: %lf s\n", duration);
fprintf(stdout, " MFlops: %.2f\n", (nb_op / duration)*1E-6);
free(A);
free(B);
free(C);
cudaFree(A);
cudaFree(B);
cudaFree(C);
return 0;
}
|
14,396 | /* threads and blocks.
* for blocks = 2, threads = 4:
* a = [0,1,2,3 | 0,1,2,3]
* index = threadIdx.x + blockIdx.x * threads
* = [0,1,2,3 | 4,5,6,7]
*/
#include <stdio.h>
#define N 8
#define THREADS_PER_BLOCK 8
#define BLOCKS (N / THREADS_PER_BLOCK)
__global__ void dot_product(int *a, int *b, int *res)
{
/* shared memory for threads in a block.
* only visible to threads running in a block.
*/
__shared__ int temp[N];
temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
/* no threads can advance in this block until all have
* reached this point. otherwise, thread 0 could finish
* before other threads have finished writting and read
* garbage from the shared memory.
*/
__syncthreads();
if(0 == threadIdx.x)
{
int sum = 0;
for(int i = 0; i < N; i++)
sum += temp[i];
*res = sum;
}
}
void random_ints(int *arr, int n)
{
int i;
for(i = 0; i < n; i++)
arr[i] = i; /*rand();*/
}
void print_arr(int *arr, int n)
{
int i, last;
for(i = 0, last = n -1; i < last; i++)
printf("%i,", arr[i]);
printf("%i\n", arr[last]);
}
int main(void)
{
int *a, *b, *res;
int *dev_a, *dev_b, *dev_res;
int size = N * sizeof(int);
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_res, sizeof(int));
a = (int*) malloc(size);
b = (int*) malloc(size);
res = (int*) malloc(sizeof(int));
random_ints(a, N);
random_ints(b, N);
/* copy dev_a, dev_b to the device */
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
/* launch device_add kernel with M blocks of N threads. */
dot_product<<<BLOCKS, THREADS_PER_BLOCK>>>(dev_a, dev_b, dev_res);
/* copy the device result (dev_res) back to res (on host) */
cudaMemcpy(res, dev_res, sizeof(int), cudaMemcpyDeviceToHost);
printf("result = %i\n", *res);
free(a);
free(b);
free(res);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_res);
return 0;
}
|
14,397 | #pragma once
#include <curand.h>
#include <curand_kernel.h>
#include <ctime>
#include <stdio.h>
#include <utility>
#include <algorithm>
#include <numeric>
#include <iostream>
#include <chrono>
#include <iomanip>
#include <sstream>
#include <fstream>
typedef double(*FunctionCallback)(double);
namespace parallel {
__global__ void minMaxThread(int n, double *generatedNumbers, double *devMin, double *devMax, FunctionCallback func, double a, double b)
{
int i = threadIdx.x;
if (i < n)
{
double x = generatedNumbers[i] * (b - a) + a;
double value = func(x);
devMax[i] = ( value > devMax[i] ) ? value : devMax[i];
devMin[i] = ( value < devMin[i] ) ? value : devMin[i];
}
}
void minMaxKernelsStart(int n, double *generatedNumbers, double *devMin, double *devMax, FunctionCallback func, double a, double b)
{
int device_number = 0;
cudaDeviceProp iProp;
cudaGetDeviceProperties(&iProp, device_number);
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, time(NULL));
curandGenerateUniformDouble(gen, generatedNumbers, n);
minMaxThread<<<(n + iProp.maxThreadsPerBlock - 1) / iProp.maxThreadsPerBlock, iProp.maxThreadsPerBlock>>>
(n, generatedNumbers, devMin, devMax, func, a, b);
cudaDeviceSynchronize();
}
std::pair<double, double> minMaxValue(int n, double a, double b, FunctionCallback func)
{
const int S = 1<<20; //2^20
double *generatedNumbers, *devMin, *devMax, *hostMin, *hostMax;
int loopSize, arraySize;
loopSize = (( n + S - 1) / S);
arraySize = S;
hostMin = (double *)malloc(S * sizeof(double));
hostMax = (double *)malloc(S * sizeof(double));
cudaMalloc((void**)&devMin, S * sizeof(double));
cudaMalloc((void**)&devMax, S * sizeof(double));
cudaMalloc((void**)&generatedNumbers, S * sizeof(double));
std::fill(hostMin, hostMin + arraySize, std::numeric_limits<double>::max());
std::fill(hostMax, hostMax + arraySize, std::numeric_limits<double>::lowest());
cudaMemcpy(devMin, hostMin, arraySize * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(devMax, hostMax, arraySize * sizeof(double), cudaMemcpyHostToDevice);
for (int i = 0; i < loopSize; i++)
{
minMaxKernelsStart(arraySize, generatedNumbers, devMin, devMax, func, a, b);
}
cudaMemcpy(hostMin, devMin, arraySize * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(hostMax, devMax, arraySize * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(generatedNumbers);
cudaFree(devMin);
cudaFree(devMax);
auto pair = std::make_pair( *std::min_element(hostMin, hostMin + arraySize),
*std::max_element(hostMax, hostMax + arraySize));
free(hostMin);
free(hostMax);
return pair;
}
__global__ void minMaxThreadV2(unsigned long seed, double a, double b, double *devMin, double *devMax, int size, int calculationsPerThread, FunctionCallback f)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
curandState_t state;
curand_init(seed, idx, 0, &state);
double x, y;
double min = devMin[idx];
double max = devMax[idx];
for(int i =0; i< calculationsPerThread; i++)
{
x = curand_uniform_double(&state) * (b - a) + a;
y = f(x);
max = ( y > max ) ? y : max;
min = ( y < min ) ? y : min;
}
devMin[idx] = min;
devMax[idx] = max;
}
}
std::pair<double, double> minMaxValueV2(int n, double a, double b, FunctionCallback func)
{
unsigned long cuRand_seed = time(NULL);
cudaDeviceProp iProp;
cudaGetDeviceProperties(&iProp, 0);
//int threads = 512;
//int blocks = 100;
int threads = iProp.maxThreadsPerBlock;
int blocks = iProp.multiProcessorCount;
int size = threads * blocks;
int sizeInBytes = size * sizeof(double);
double *devMin, *devMax, *hostMin, *hostMax;
cudaMalloc((void**)&devMin, sizeInBytes * sizeof(double));
cudaMalloc((void**)&devMax, sizeInBytes * sizeof(double));
hostMin = (double *)malloc(sizeInBytes);
hostMax = (double *)malloc(sizeInBytes);
std::fill(hostMin, hostMin + size, std::numeric_limits<double>::max());
std::fill(hostMax, hostMax + size, std::numeric_limits<double>::lowest());
cudaMemcpy(devMin, hostMin, sizeInBytes, cudaMemcpyHostToDevice);
cudaMemcpy(devMax, hostMax, sizeInBytes, cudaMemcpyHostToDevice);
int calculationsPerThread = (n + size -1) / size;
minMaxThreadV2<<<blocks, threads>>>(cuRand_seed, a, b, devMin, devMax, size, calculationsPerThread, func);
cudaDeviceSynchronize();
cudaMemcpy(hostMin, devMin, sizeInBytes, cudaMemcpyDeviceToHost);
cudaMemcpy(hostMax, devMax, sizeInBytes, cudaMemcpyDeviceToHost);
cudaFree(devMin);
cudaFree(devMax);
auto pair = std::make_pair( *std::min_element(hostMin, hostMin + size),
*std::max_element(hostMax, hostMax + size));
free(hostMin);
free(hostMax);
return pair;
}
void timeTestMinMaxPar(int m, int n, double a, double b, FunctionCallback f){
std::cout << std::setprecision(5);
std::chrono::duration<double> total = std::chrono::duration<double>::zero();
std::chrono::duration<double> diff;
std::chrono::high_resolution_clock::time_point start;
std::chrono::high_resolution_clock::time_point end;
std::ofstream file;
std::stringstream filename;
filename << "minMaxPar_" << m << '_' << n << ".txt";
n = 1 << n;
file.open(filename.str());
if (file.good() == true)
{
std::cout << "Testing parallel MinMax... for size: " << n << std::endl;
for(int i = 1; i <= m; ++i){
start = std::chrono::high_resolution_clock::now();
minMaxValue(n, a, b, f);
end = std::chrono::high_resolution_clock::now();
std::cout << "\r" << i * 100.0 / m << "% ";
std::cout << std::flush;
diff = end - start;
file << diff.count() << std::endl;
total += diff;
}
file.close();
}
std::cout << std::endl;
std::cout << "Parallel MinMax average time: " << total.count()/m << std::endl;
}
}
|
14,398 | #include "cuda_runtime.h"
#include <stdio.h>
__global__ static void sayHello()
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
printf("Hello from thread %d!\n", i);
}
int main()
{
int grid_size;
int tpb;
puts("Please enter the grid size: ");
scanf("%d", &grid_size);
puts("\nPlease enter the threads per block: ");
scanf("%d", &tpb);
puts("\n");
sayHello<<<grid_size,tpb>>>();
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
} |
14,399 | #include "kernel.cuh"
void error(char const* str)
{
fprintf(stderr, "%s\n", str);
exit(1);
}
void cuda_check(cudaError_t err, char const* str)
{
if (err != cudaSuccess) {
fprintf(stderr, "%s: CUDA error %d (%s)\n",
str, err, cudaGetErrorString(err));
}
}
__global__ void fractal(int nrows, int ncols, int max_iter, double startX, double startY, double zoom, int* result) //373 - 326
{
int x_idx = threadIdx.x + blockIdx.x * blockDim.x;
int y_idx = threadIdx.y + blockIdx.y * blockDim.y;
if (x_idx >= ncols || y_idx >= nrows)
return;
int l = x_idx + y_idx * ncols;
double x = ((x_idx - ncols / 2) * zoom + startX) / ncols * 3.5 - 0.75;
double y = ((y_idx - nrows / 2) * zoom + startY) / nrows * 2.0;
double re = x, im = y;
for (int i = 1; i < max_iter; ++i)
{
if (re * re + im * im >= 4)
{
result[l] = i;
return;
}
double reTemp = re * re - im * im + x;
im = 2 * re * im + y;
re = reTemp;
}
result[l] = 0;
}
#if __DEBUG
__global__ void fractal_old(int nrows, int ncols, int* result) //398.86642 - 343.53
{
int x_idx = threadIdx.x + blockIdx.x * blockDim.x;
int y_idx = threadIdx.y + blockIdx.y * blockDim.y;
if (x_idx >= nrows || y_idx >= ncols)
return;
int l = x_idx * ncols + y_idx;
double x = (double)x_idx / nrows * 3.5 - 2.5;
double y = (double)y_idx / ncols * 2.0 - 1.0;
double re = x, im = y;
for (int i = 1; i < MAX_ITERATIONS; ++i)
{
if (re * re + im * im >= 4)
{
result[l] = i;
return;
}
double reTemp = re * re - im * im + x;
im = 2 * re * im + y;
re = reTemp;
}
result[l] = 0;
}
#endif
int* mendelbrot_kernel(int nrows, int ncols, int max_iter, double startX, double startY, double zoom)
{
cudaError_t err;
cudaEvent_t start, end;
int* d_result, * h_result;
size_t res_size = nrows * ncols * sizeof(int);
float time;
err = cudaMalloc(&d_result, res_size);
cuda_check(err, "cudaMalloc");
err = cudaMallocHost(&h_result, res_size);
cuda_check(err, "cudaMallocHost");
cudaEventCreate(&start);
cudaEventCreate(&end);
dim3 dimBlock(32, 32);
dim3 dimGrid((ncols + 31) / 32, (nrows + 31) / 32);
cudaEventRecord(start);
fractal << <dimGrid, dimBlock >> > (nrows, ncols, max_iter, startX, startY, zoom, d_result);
cudaEventRecord(end);
err = cudaMemcpy(h_result, d_result, res_size, cudaMemcpyDeviceToHost);
cuda_check(err, "cudaMalloc");
err = cudaEventSynchronize(end);
cuda_check(err, "sync");
cudaEventElapsedTime(&time, start, end);
//printf("Tempo passato = %f\n", time);
err = cudaFree(d_result);
cuda_check(err, "cudaFree");
return h_result;
}
#if __DEBUG
int main()
{
int* result = kernel_start(12000, 8000);
//int* result = kernel_start();
// for (int j = NCOLS - 1; j >= 0; --j)
// {
// for (int i = 0; i < NROWS; i++)
// {
// printf("%d ", result[i * NCOLS + j]);
// }
// printf("\n");
// }
// for (int j = 0; j < NROWS; ++j)
// {
// for (int i = 0; i < NCOLS; ++i)
// {
// printf("%d ", result[j * NCOLS + i]);
// }
// printf("\n");
// }
return 0;
}
#endif |
14,400 | #include <iostream>
#include <string> // stof, stoi
#include <chrono>
#include <ctime>
#include <fstream>
#include <cmath>
#include <iomanip>
#include <stdio.h>
#include <math.h>
#include <assert.h>
// CUDA libraries
#include <curand.h>
#include <curand_kernel.h>
#define DEBUG 0
// function to integrate: sin(x)/x
struct mem_t {
double* ptr;
size_t numItems;
size_t numBytes;
mem_t(): ptr(NULL), numItems(0), numBytes(0) {};
};
// monte carlo ; each CUDA thread
__global__ void integrate(double a, double b, int numSamples, double* work, double* results, int bx, int by) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int id = iy*bx + ix;
curandState_t state;
curand_init(id, 0, 0, &state); // curand_init(seed, sequence number, offset, &state)
for(int i=0; i<numSamples; i++) {
// (rand() % (upper - lower + 1)) + lower;
double x = curand_uniform_double(&state);
x *= (b - a + 0.999999);
x += a;
double result = sin(x)/x;
result = result * (isnan(result) == 0);
work[id] += result; // if nan, it will simply add zero ; reduces branches
}
//synchronize threads within block
__syncthreads();
#if DEBUG
if(id==0) {
for(int i=0; i<bx*by; i++) {
if(i%bx==0 && i!=0) printf("\n");
printf("%.2f ", work[i]);
}
printf("\n");
}
__syncthreads();
#endif
// sum up the values in each row
if(threadIdx.x == 0) { // if thread column is left-most
for(int x=1; x<bx; x++) { // sum up all the partial sums in this row
int index = iy*bx + (x + blockIdx.x * blockDim.x);
work[id] += work[index];
}
}
//synchronize threads within block
__syncthreads();
#if DEBUG
if(id==0) {
printf("After row sums computed\n");
for(int i=0; i<bx*by; i++) {
if(i%bx==0 && i!=0) printf("\n");
printf("%.2f ", work[i]);
}
printf("\n");
}
__syncthreads();
#endif
// sum up each row's sum into the results
if(threadIdx.x == 0 && threadIdx.y == 0) {
for(int y=0; y<by; y++) {
int index = (y + blockIdx.y * blockDim.y)*bx + ix;
results[blockIdx.x] += work[index];
#if DEBUG
printf("id=%i, writing to results[%u]=%.2f, added=%.2f\n", id, blockIdx.x, results[blockIdx.x], work[index]);
#endif
}
}
}
int main(int argc, char* argv[]) {
if(argc < 5) {
printf("./integrate lowerLimit, upperLimit, numSamples, numThreads\n");
exit(1);
}
double a = std::stof(argv[1]); // lower limit
double b = std::stof(argv[2]); // upper limit
int numSamples = std::stoi(argv[3]);
int numThreads = std::stoi(argv[4]);
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
// define grid and block structure ; largest possible block can hold 1,024 threads (32x32)
int bx = 32;
int by = 32;
{
int minLength = sqrt(numThreads);
if(minLength <= 32) {
bx = minLength;
by = minLength;
}
}
const int numBlocks = ceil(numThreads / (bx*by));
dim3 block(bx, by);
dim3 grid(numBlocks); // 1D grid
// set up memory for results
mem_t d_results;
d_results.numItems = numBlocks;
d_results.numBytes = d_results.numItems * sizeof(double);
cudaMalloc((void**)&d_results.ptr, d_results.numBytes); // each block will compute sum ; host will add up the block sums
// set up memory for work buffer ; this buffer does not have to be sent back to the host
mem_t d_work;
d_work.numItems = (bx*by) * numBlocks;
d_work.numBytes = d_work.numItems * sizeof(double);
cudaMalloc((void**)&d_work.ptr, d_work.numBytes); // each block will compute sum ; host will add up the block sums
// launch CUDA kernel
int samplesPerThread = numSamples / ((bx*by) * numBlocks);
integrate<<<grid, block>>>(a, b, samplesPerThread, d_work.ptr, d_results.ptr, bx, by);
#if DEBUG
printf("a=%.2f,b=%.2f,numSamples=%i,numThreads=%i,samplesPerThread=%i,numBlocks=%i\n", a, b, numSamples, numThreads, samplesPerThread, numBlocks);
printf("block.x=%u, block.y=%u\n", block.x, block.y);
#endif
// copy results from device to host
double* h_results = (double*) malloc(d_results.numBytes);
cudaMemcpy(h_results, d_results.ptr, d_results.numBytes, cudaMemcpyDeviceToHost);
// compute the sum of each block
double integral = 0.0;
for(int i=0; i<numBlocks; i++) {
#if DEBUG
printf("Block %i: sum=%.2f\n", i, h_results[i]);
#endif
integral += h_results[i];
}
integral = abs(b-a) * (integral/(samplesPerThread * (bx*by) * numBlocks)); // sum/numSamples
// free device memory
cudaFree(d_results.ptr);
cudaFree(d_work.ptr);
// free host memory
free(h_results);
end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_sec = end - start;
std::cout << std::setprecision(10) << integral << std::endl;
#if DEBUG
std::cout << numThreads << " threads: " << " numBlocks " << numBlocks << ", Result: " << std::setprecision(10) << integral << "; Elapsed time: " << elapsed_sec.count() << "s" << std::endl;
#endif
// write to csv file
std::string csvfile_name = std::to_string((int)abs(a)) + "-" + std::to_string((int)abs(b)) + "-" + std::to_string(numSamples) + "-" + std::to_string(numThreads) + ".csv";
std::ofstream csvfile;
csvfile.open(csvfile_name);
// write csv header
csvfile << "a,b,numSamples,integral,elapsed(sec),method,block,numBlocks,numThreads\n";
csvfile << std::to_string((int)a) << ","
<< std::to_string((int)b) << ","
<< std::to_string(numSamples) << ","
<< std::to_string(integral) << ","
<< elapsed_sec.count() << ","
<< "monte-carlo" << ','
<< "(" << bx << ";" << by << ")" << ","
<< numBlocks << ","
<< std::to_string(numThreads) << "\n";
csvfile.close();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.