serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
9,201 | /* jacobi.c - Poisson problem in 3d
*
*/
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
__device__
void jacobi_gpuStopTest(int N, double ***u, double ***v, double ***f, int iter_max, double *res) {
//int counter = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
double stopTest =0;
//v[i][j][k] = u[i][j][k];
//} while (counter <iter_max);
if(i > 0 && j > 0 && k > 0 && i<N-1 && j<N-1 && k<N-1){
//v[i][j][k] = u[i][j][k];
u[i][j][k] = 1./6.*(v[i-1][j][k]+v[i+1][j][k]+v[i][j-1][k]+v[i][j+1][k]+v[i][j][k-1]+v[i][j][k+1] + 1./((N)*(N)) * f[i][j][k]);
//printf("i=%i j=%i k=%i | u=%f v=%f f=%f\n", i, j, k, u[i][j][k], v[i][j][k], f[i][j][k]);
//stopTest +=(u[i][j][k]-v[i][j][k])*(u[i][j][k]-v[i][j][k]);
stopTest =(u[i][j][k]-v[i][j][k])*(u[i][j][k]-v[i][j][k]);
}
atomicAdd(res,stopTest);
}
// Kernel to be launched on a single thread
__global__
void jacobi_stopTest(int N, double ***u, double ***v, double ***f, int iter_max, double *res)
{
jacobi_gpuStopTest(N, u, v, f, iter_max, res);
//__syncthreads();
} |
9,202 | #include "includes.h"
__global__ void sumMatrix(float *A, float *B, float *C, int nx, int ny) {
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = ix + iy * nx;
if(ix < nx && iy < ny) {
C[idx] = A[idx] + B[idx];
}
} |
9,203 | #define COALESCED_NUM 16
#define blockDimX 128
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 16
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 2048
#define WIDTH_A 2048
__global__ void matmul(float * A, float * B, float * C, int width, int height)
{
__shared__ float shared_0[16][17];
int i;
float sum_0;
float sum_1;
float sum_2;
float sum_3;
float sum_4;
float sum_5;
float sum_6;
float sum_7;
float sum_8;
float sum_9;
float sum_10;
float sum_11;
float sum_12;
float sum_13;
float sum_14;
float sum_15;
sum_0=0;
sum_1=0;
sum_2=0;
sum_3=0;
sum_4=0;
sum_5=0;
sum_6=0;
sum_7=0;
sum_8=0;
sum_9=0;
sum_10=0;
sum_11=0;
sum_12=0;
sum_13=0;
sum_14=0;
sum_15=0;
for (i=0; i<width; i=(i+16))
{
int it_1;
shared_0[((tidx%16)+0)][(tidx/16)]=A(((((bidy*16)+tidy)+(tidx/16))+0), (i+(tidx%16)));
shared_0[((tidx%16)+0)][(tidx/16)]=A(((((bidy*16)+tidy)+(tidx/16))+8), (i+(tidx%16)));
__syncthreads();
#pragma unroll
for (it_1=0; it_1<16; it_1=(it_1+1))
{
float a_0;
float a_1;
float a_2;
float a_3;
float a_4;
float a_5;
float a_6;
float a_7;
float a_8;
float a_9;
float a_10;
float a_11;
float a_12;
float a_13;
float a_14;
float a_15;
float b;
a_0=shared_0[it_1][0];
a_1=shared_0[it_1][1];
a_2=shared_0[it_1][2];
a_3=shared_0[it_1][3];
a_4=shared_0[it_1][4];
a_5=shared_0[it_1][5];
a_6=shared_0[it_1][6];
a_7=shared_0[it_1][7];
a_8=shared_0[it_1][8];
a_9=shared_0[it_1][9];
a_10=shared_0[it_1][10];
a_11=shared_0[it_1][11];
a_12=shared_0[it_1][12];
a_13=shared_0[it_1][13];
a_14=shared_0[it_1][14];
a_15=shared_0[it_1][15];
b=B((it_1+i), idx);
sum_0+=(a_0*b);
sum_1+=(a_1*b);
sum_2+=(a_2*b);
sum_3+=(a_3*b);
sum_4+=(a_4*b);
sum_5+=(a_5*b);
sum_6+=(a_6*b);
sum_7+=(a_7*b);
sum_8+=(a_8*b);
sum_9+=(a_9*b);
sum_10+=(a_10*b);
sum_11+=(a_11*b);
sum_12+=(a_12*b);
sum_13+=(a_13*b);
sum_14+=(a_14*b);
sum_15+=(a_15*b);
}
__syncthreads();
}
{
C((((bidy*16)+tidy)+0), idx)=sum_0;
}
{
C((((bidy*16)+tidy)+1), idx)=sum_1;
}
{
C((((bidy*16)+tidy)+2), idx)=sum_2;
}
{
C((((bidy*16)+tidy)+3), idx)=sum_3;
}
{
C((((bidy*16)+tidy)+4), idx)=sum_4;
}
{
C((((bidy*16)+tidy)+5), idx)=sum_5;
}
{
C((((bidy*16)+tidy)+6), idx)=sum_6;
}
{
C((((bidy*16)+tidy)+7), idx)=sum_7;
}
{
C((((bidy*16)+tidy)+8), idx)=sum_8;
}
{
C((((bidy*16)+tidy)+9), idx)=sum_9;
}
{
C((((bidy*16)+tidy)+10), idx)=sum_10;
}
{
C((((bidy*16)+tidy)+11), idx)=sum_11;
}
{
C((((bidy*16)+tidy)+12), idx)=sum_12;
}
{
C((((bidy*16)+tidy)+13), idx)=sum_13;
}
{
C((((bidy*16)+tidy)+14), idx)=sum_14;
}
{
C((((bidy*16)+tidy)+15), idx)=sum_15;
}
}
|
9,204 | #include <cstdio>
#define BLOCK_SIZE 256
__global__ void mtrxToVec(int* mtrx,int* vec, int* ans,int n){
int idx=blockDim.x*blockIdx.x+threadIdx.x;
while(idx<n){
int sum=0;
for(int i=0;i<n;i++)
sum+=mtrx[n*i+idx]*vec[idx];
ans[idx]=sum;
idx+=gridDim.x*blockDim.x;
}
return;
}
__global__ void mtrxToVecY(int* mtrx,int* vec, int* ans,int n){
int idy=blockDim.y*blockIdx.y+threadIdx.y;
while(idy<n){
int sum=0;
for(int i=0;i<n;i++)
sum+=mtrx[n*i+idy]*vec[idy];
ans[idy]=sum;
idy+=gridDim.y*blockDim.y;
}
return;
}
__global__ void transposeMtrxToVec(int* mtrx,int* vec, int* ans,int n){
int idx=blockDim.x*blockIdx.x+threadIdx.x;
while(idx<n){
int sum=0;
for(int i=0;i<n;i++)
sum+=mtrx[n+idx*i]*vec[idx];
ans[idx]=sum;
idx+=gridDim.x*blockDim.x;
}
return;
}
__host__ void cuAssert(cudaError_t error){
if(error!=cudaSuccess){
printf("%s\n",cudaGetErrorString(error));
exit(0);
}
}
__host__ void print(int* ans,int n){
for(int i=0;i<n;i++)
printf("%5d ",ans[i]);
printf("\n");
return;
}
__host__ int main(int argc,char* argv[]){
cudaEvent_t start,stop;
cuAssert(cudaEventCreate(&start));
cuAssert(cudaEventCreate(&stop));
float time;
const int n=10000;
size_t size=sizeof(int)*n;
int* vec=(int*)malloc(size);
int* ans=(int*)malloc(size);
int* mtrx=(int*)malloc(size*n);
for(int i=0;i<n;i++){
for(int j=0;j<n;j++)
mtrx[i*n+j]=(i==j)?(1):(0);
vec[i]=i;
}
int* cuVec=NULL;
int* cuAns=NULL;
int* cuMtrx=NULL;
cuAssert(cudaMalloc((void**)&cuVec,size));
cuAssert(cudaMalloc((void**)&cuAns,size));
cuAssert(cudaMalloc((void**)&cuMtrx,size*n));
cuAssert(cudaMemcpy(cuMtrx,mtrx,size*n,cudaMemcpyHostToDevice));
cuAssert(cudaMemcpy(cuVec,vec,size,cudaMemcpyHostToDevice));
dim3 grid((n+BLOCK_SIZE-1)/BLOCK_SIZE,1,1);
dim3 block(BLOCK_SIZE,1,1);
//coalesced block 1D indexes
cuAssert(cudaEventRecord(start));
mtrxToVec<<<grid,block>>>(cuMtrx,cuVec,cuAns,n);
cuAssert(cudaEventRecord(stop));
cuAssert(cudaEventSynchronize(stop));
cuAssert(cudaEventElapsedTime(&time,start,stop));
printf("coalesced time X %f\n",time);
cuAssert(cudaGetLastError());
cuAssert(cudaMemcpy(ans,cuAns,size,cudaMemcpyDeviceToHost));
//print(ans,n);
dim3 gridY(1,(n+BLOCK_SIZE-1)/BLOCK_SIZE,1);
dim3 blockY(1,BLOCK_SIZE,1);
cuAssert(cudaEventRecord(start));
mtrxToVecY<<<gridY,blockY>>>(cuMtrx,cuVec,cuAns,n);
cuAssert(cudaEventRecord(stop));
cuAssert(cudaEventSynchronize(stop));
cuAssert(cudaEventElapsedTime(&time,start,stop));
printf("coalesced time Y %f\n",time);
cuAssert(cudaGetLastError());
//Non coalesced block
cuAssert(cudaEventRecord(start));
transposeMtrxToVec<<<grid,block>>>(cuMtrx,cuVec,cuAns,n);
cuAssert(cudaEventRecord(stop));
cuAssert(cudaEventSynchronize(stop));
cuAssert(cudaEventElapsedTime(&time,start,stop));
printf("non coalesced time %f\n",time);
cuAssert(cudaGetLastError());
cuAssert(cudaFree(cuVec));
cuAssert(cudaFree(cuAns));
cuAssert(cudaFree(cuMtrx));
free(vec);
free(ans);
free(mtrx);
return 0;
}
|
9,205 | #include <stdio.h>
__global__ void add( int *c)
{
int id = blockIdx.x;
c[id] = id;
}
#define N 10
int main(void)
{
int c[N];
int *dev_c;
int i;
/* Allocate memory on the device */
cudaMalloc( (void**)&dev_c, N*sizeof(int));
add<<<N,1>>>(dev_c );
/* Copy contents of dev_c back to c */
cudaMemcpy( &c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for(i = 0; i < N; i++)
{
printf( "c[%d] = %d\n",i,c[i]);
}
cudaFree(dev_c);
}
|
9,206 | #include <stdio.h>
__global__ void nullkernel( void) {
}
int main(void) {
nullkernel<<<1,1>>>();
printf("Hello World!\n");
return 0;
}
|
9,207 | #include "includes.h"
/**********************************************************
* @author Pulkit Verma
* @email technopreneur[dot]pulkit[at]gmail[dot]com
**********************************************************/
// The program takes two equal size vectors as input and outputs their vector sum
__global__ void vecAdd(float *in1, float *in2, float *out, int len)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<len)
out[i]=in1[i]+in2[i];
return;
} |
9,208 | #include <stdio.h>
/* Function that normalizes the filter on the max element */
void gaussNorm(float *data,int size){
float max=0;
for (int i=0;i<size*size;i++)
if(data[i]>max) max = data[i];
for (int i=0;i<size*size;i++)
data[i]/=max;
}
/* This funtion calculates the 2D gussian filter gaussFilter[size*size]
* It is based on MATLAB's fspecial('gaussian',size,sigma) function's algorithm.
*/
float * gaussFilter(int size, float sigma){
/* Get the mesh's rows and columns */
float rows = ((float)size-1)/2;
float cols = ((float)size-1)/2;
/* Allocate the final array */
float *filter = (float *)malloc(size*size*sizeof(float));
/* In the following loops a mesh is simulated from -rows to +rows and
* from -cols to cols with step 1.
* Every element el[i][j] of the 2D filter is calculated with the following formula
* el[i][j] = exp (- (x^2 + y^2) / (2*sigma^2)) / sum (el)
* where x and y are the mesh's elements for the specific filter's element.
*/
float sum = 0;
float x=-rows;
for (int i = 0; i<size; i++){
float y=-cols;
for (int j=0; j<size; j++){
filter[i*size + j] = exp(- (x*x + y*y)/ (2*sigma*sigma));
sum += filter[i*size + j];
/* Step = 1 */
y++;
}
x++;
}
for (int i=0;i<size*size;i++)
filter[i] =filter[i] / sum ;
return filter;
}
|
9,209 | /*
* bandwidth-test, used for plotting
* can use pageable or pinned memory
*/
#include <stdio.h>
#include <getopt.h>
#include <cuda_runtime.h>
#define YELLOW "\e[1;33m"
#define RED "\e[1;31m"
#define WHITE "\e[1;00m"
typedef struct {
float mseconds_h2d;
float mseconds_d2h;
float bandwidth_h2d;
float bandwidth_d2h;
bool err;
} Profiling;
typedef enum {
PLOT,
HUMAN
} Format;
typedef enum {
PINNED,
PAGEABLE
} Mode;
struct option long_options[] = {
{"num" , required_argument, 0, 0},
{"mode" , required_argument, 0, 0},
{"format" , required_argument, 0, 0},
{"help" , 0 , 0, 0},
{0 , 0 , 0, 0}
};
float calcBandwidth(size_t size, float mseconds) {
return (size/((float)1024*1024*1024)) / (mseconds / 1000.0f);
}
/*! perform a profiled memory transfer
*
* with pinned or pageable memory using size bytes
*
*/
Profiling profileTransfer(size_t size, Mode mode, Format format) {
cudaEvent_t start, stop;
cudaError_t err;
void *devPtr, *hostPtr;
Profiling profile = {0};
profile.err = true;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (mode == PINNED) {
err = cudaMallocHost(&hostPtr, size);
if (err != cudaSuccess) {
fprintf(stderr, "! host pinned memory allocation failed.\n! %s\n",
cudaGetErrorString(err));
return profile;
}
}else if (mode == PAGEABLE) {
hostPtr = malloc(size);
if (hostPtr == NULL) {
printf("! host memory allocation failed!\n");
return profile; // lets continue, though it is most likely not working
}
}else {
return profile;
}
if (format == HUMAN) {
printf(" > allocating %s%d%s bytes in device memory\n", YELLOW, size, WHITE);
}
cudaEventRecord(start);
err = cudaMalloc(&devPtr, size);
if (err != cudaSuccess) {
fprintf(stderr, "! cuda error: %s\n", cudaGetErrorString(err));
return profile;
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&profile.mseconds_h2d, start, stop);
if (format == HUMAN) {
printf(" > allocated in %.2f milliseconds\n");
}
//Host --> Device
cudaEventRecord(start);
cudaMemcpy(devPtr, hostPtr, size, cudaMemcpyDefault);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&profile.mseconds_h2d, start, stop);
profile.bandwidth_h2d = calcBandwidth(size, profile.mseconds_h2d);
if (format == HUMAN) {
printf(" > copied host->device in %.2f milliseconds\n", profile.mseconds_h2d);
printf(" > this equals a bandwidth of %s%.2f%s GB/s\n", RED, profile.bandwidth_h2d, WHITE);
}
// Device --> Host
cudaEventRecord(start);
cudaMemcpy(hostPtr, devPtr, size, cudaMemcpyDefault);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&profile.mseconds_d2h, start, stop);
profile.bandwidth_d2h = calcBandwidth(size, profile.mseconds_d2h);
if (format == HUMAN) {
printf(" > copied device->host in %.2f milliseconds\n", profile.mseconds_d2h);
printf(" > this equals a bandwidth of %.2f GB/s\n", profile.bandwidth_d2h);
}
if (mode == PINNED) {
cudaFreeHost(hostPtr);
}else if (mode == PAGEABLE) {
free(hostPtr);
}
cudaFree(devPtr);
profile.err = false;
return profile;
}
void showUsage(void) {
printf("""usage: bandwidth [-p] [-o]\n""");
}
int main(int argc, char** argv)
{
int num_samples = 0; // number of samples
Mode mode = PAGEABLE; // Transfer-mode, pinned or pageable
Format format = HUMAN; // Format: human-readable or plotable
bool verbose = false;
char *output = NULL;
FILE *fd = NULL;
while(1) {
int option_index = 0;
int c = getopt_long(argc, argv, "n:m:f:ho:", long_options, &option_index);
if (c == -1) {
break;
}
switch(c) {
case 'm':
if (optarg) {
if (strcmp(optarg, "pinned") == 0) {
mode = PINNED;
} else if ( strcmp(optarg, "pageable") == 0) {
mode = PAGEABLE;
}
}
break;
case 'f':
if (optarg) {
if (strcmp(optarg, "plot") == 0) {
format = PLOT;
} else if (strcmp(optarg, "human") == 0 ||
strcmp(optarg, "plain") == 0) {
format = HUMAN;
}
}
break;
case 'o': //output file
if (optarg) {
size_t len = strnlen(optarg, 255);
output = (char*) malloc(sizeof(char) * len + 1);
strncpy(output, optarg, len);
output[len] = 0; // null-terminate
}
break;
case 'h':
showUsage();
exit(0);
case 'n': // --num
if (optarg) {
num_samples = atoi(optarg);
}
break;
case 'v': // verbose
verbose = true;
break;
default:
printf("error\n");
exit(-1);
}
}
if (num_samples != 0 && format == HUMAN) {
fprintf(stderr, "!! can't provide amount of samples for human format.\n");
exit(1);
}
cudaDeviceReset();
size_t *samples = NULL;
if (format == HUMAN) {
num_samples = 0;
size_t samples_tmp[] = {1, 64, 512, 1024, 1024*1024, 1024*1024*10, 1024*1024*10, 1024*1024*200, 1024*1024*500, 1024*1024*900};
num_samples = sizeof(samples_tmp) / sizeof(samples_tmp[0]);
printf("num: %d\n", num_samples);
samples =(size_t*) malloc(sizeof(size_t) * num_samples);
for(int i=0; i < num_samples; i ++) {
*(samples+i) = samples_tmp[i]; // i know, i know...
} // could've used C++ new op...
}else if (format == PLOT) { // use more data points for plotting
num_samples = num_samples ? num_samples : 128;
samples = (size_t*)malloc(sizeof(size_t) * num_samples);
size_t max_size = 1024*1024*700;
int i=0;
for (size_t size = 1; size < max_size; size += max_size/num_samples, i++) {
samples[i] = size;
}
}
Profiling profile = {0};
if (output != NULL) {
fd = fopen(output, "a+");
if (fd == NULL) {
fprintf(stderr, "!! error opening output-file.\n");
exit(1);
}
}else {
fd = stdout;
}
for (int i=0; i < num_samples; i++) {
size_t size = samples[i];
if (format == PLOT) {
if (i % (num_samples/10) == 0 && i != 0) {
fprintf(stderr, "%d[%d]\n", i, size);
}else {
fprintf(stderr, ".");
}
}
if (format == HUMAN) {
printf("* test #%d: %d bytes ( %.2f MB)", i, size, size / (float)(1024*1024));
printf("> allocating %d bytes in host memory\n", size);
}
profile = profileTransfer(size, mode, format);
fprintf(fd, "%d\t%f\t%f\t%f\n", size, profile.bandwidth_h2d, profile.bandwidth_d2h,
(profile.bandwidth_h2d + profile.bandwidth_d2h) / 2.0);
}
if (fd != NULL) {
fclose(fd);
}
free(samples);
return 0;
}
|
9,210 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void unique_index_calc_threadIdx(int *input){
// receives a pointer to an int
int tid = threadIdx.x;
printf("threadIdx: %d, value: %d\n", tid, input[tid]);
}
__global__ void unique_gid_calculation(int *input){
int tid = threadIdx.x;
int offset = blockIdx.x * blockDim.x;
int gid = tid + offset;
printf("blockIdx.x: %d, threadIdx.x: %d, gid: %d, value: %d\n", blockIdx.x, tid, gid, input[gid]);
}
int main(){
int array_size = 16;
int array_bit_size = sizeof(int) * array_size;
int h_data[] = {23, 9, 4, 53, 65, 12, 1, 33, 3, 92, 41, 54, 68, 11, 45, 21};
for(int i = 0; i < array_size; i++){
printf("%d ", h_data[i]);
}
printf("\n\n");
int *d_data;
cudaMalloc((void **)&d_data, array_bit_size);
cudaMemcpy(d_data, h_data, array_bit_size, cudaMemcpyHostToDevice);
dim3 block(4);
dim3 grid(4);
//unique_index_calc_threadIdx <<<grid, block>>>(d_data);
unique_gid_calculation <<<grid, block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
9,211 | //pass
//--blockDim=256 --gridDim=256
#include <cuda.h>
//////////////////////////////////////////////////////////////////////////////
//// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
//// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
//// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
//// PARTICULAR PURPOSE.
////
//// Copyright (c) Microsoft Corporation. All rights reserved
//////////////////////////////////////////////////////////////////////////////
//----------------------------------------------------------------------------
// File: Histogram.cpp
//
// Implements histogram in C++ AMP
// Refer README.txt
//----------------------------------------------------------------------------
#define histogram_bin_count 256 /* Bin count */
#define log2_thread_size 5U
#define thread_count 8 /* number of partial histogram per tile */
#define histogram256_tile_size (thread_count * (1U << log2_thread_size))
#define histogram256_tile_static_memory (thread_count * histogram_bin_count)
#define merge_tile_size histogram_bin_count /* Partial result Merge size */
#define partial_histogram256_count (thread_count * (1U << log2_thread_size))
// This function aggregates partial results
__global__ void histo_merge_kernel(unsigned int* partial_result, unsigned int* histogram_amp)
{
{
unsigned sum = 0;
for (unsigned i = threadIdx.x;
i < partial_histogram256_count * histogram_bin_count; i += merge_tile_size)
{
sum += partial_result[blockIdx.x + i * histogram_bin_count];
}
__shared__ unsigned s_data[merge_tile_size];
s_data[threadIdx.x] = sum;
// parallel reduce within a tile
for (int stride = merge_tile_size / 2;
stride > 0; stride >>= 1)
{
#ifndef MUTATION
/* BUGINJECT: REMOVE_BARRIER, DOWN */
__syncthreads();
#endif
if (threadIdx.x < stride)
{
s_data[threadIdx.x] += s_data[threadIdx.x + stride];
}
}
// tile sum is updated to result array by zero-th thread
if (threadIdx.x == 0)
{
histogram_amp[blockIdx.x] = s_data[0];
}
}
}
|
9,212 | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
__global__ void staticReverse(int *d, int n)
{
__shared__ int s[64];
int t = threadIdx.x;
int tr = n-t-1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
__global__ void dynamicReverse(int *d, int n)
{
extern __shared__ int s[];
int t = threadIdx.x;
int tr = n-t-1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
int main(void)
{
const int n = 64;
int a[n], r[n], d[n];
for (int i = 0; i < n; i++) {
a[i] = i;
r[i] = n-i-1;
d[i] = 0;
}
int *d_d;
cudaMalloc(&d_d, n * sizeof(int));
// run version with static shared memory
cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice);
staticReverse<<<1,n>>>(d_d, n);
cudaMemcpy(d, d_d, n*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);
// run dynamic shared memory version
cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice);
dynamicReverse<<<1,n,n*sizeof(int)>>>(d_d, n);
cudaMemcpy(d, d_d, n * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);
}
|
9,213 | #include <stdio.h>
#include <stdlib.h>
__global__ void vecadd_cuda(double* a, double* b, double* c, int width) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < width)
c[i] = a[i] + b[i];
}
int main()
{
int width = 10000000;
double *a = (double*) malloc (width * sizeof(double));
double *b = (double*) malloc (width * sizeof(double));
double *c = (double*) malloc (width * sizeof(double));
for(int i = 0; i < width; i++) {
a[i] = i;
b[i] = width-i;
c[i] = 0;
}
int size = width*sizeof(double);
double *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_b, size);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_c, size);
int block_size = 1024;
dim3 dimGrid((width-1)/block_size + 1, 1, 1);
dim3 dimBlock(block_size,1,1);
vecadd_cuda<<<dimGrid,dimBlock>>>(d_a, d_b, d_c, width);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < width; i++)
printf("\n c[%d] = %f",i,c[i]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
9,214 | #include "includes.h"
using namespace std;
#define size 10
#define block 10
__global__ void find_max(int* input, int* result, int n)
{
__shared__ int sdata[size];
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int tx = threadIdx.x;
int x = -INT_MAX;
if (i<n)
{
x = input[i];
}
sdata[tx] = x;
__syncthreads();
for(unsigned int s = blockDim.x >> 1 ; s>0 ; s>>=1)
{
__syncthreads();
if(tx<s)
{
if(sdata[tx]<sdata[tx+s])
sdata[tx]=sdata[tx+s];
}
}
if (threadIdx.x == 0)
{
result[blockIdx.x] = sdata[0];
}
} |
9,215 | #include <iostream>
#include <cstdio>
using namespace std;
__device__ double xmax = 3.14;
double xmax_;
__global__
void fun(int N)
{
double *x = new double[N];
double *y = new double[N];
if (x) {
x[N-1] = 3.1415926;
y[N-1] = 2.7182818;
//printf("x[N-1] = %f\n", x[N-1]);
}
else
printf("heap overflow\n");
}
int main()
{
int N = 1024;
size_t heapsize;
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 41943040 + 96*1024*1024);
cudaDeviceGetLimit(&heapsize, cudaLimitMallocHeapSize);
cout << "heapsize = " << heapsize << endl;
fun<<<1024*1024/256,256>>>(N);
cudaDeviceSynchronize();
xmax_ = 5.;
}
|
9,216 | #include <stdio.h>
#define SIZE 400
#define USE_SHARED
typedef int value_type;
__global__ void atomic_test_shared(value_type *dest)
{
__shared__ int x;
x = 0;
__syncthreads();
atomicAdd(&x, 1);
__syncthreads();
dest[threadIdx.x] = x;
}
__global__ void atomic_test_global(int *dest)
{
*dest = 0;
__syncthreads();
atomicAdd(dest, 1);
__syncthreads();
dest[threadIdx.x] = dest[0];
}
int main()
{
value_type *data;
cudaMalloc( (void**) &data, SIZE*sizeof(value_type));
#ifdef USE_SHARED
puts("using shared 2\n");
atomic_test_shared<<<1,SIZE>>>(data);
#else
puts("using global\n");
atomic_test_global<<<1,SIZE>>>(data);
#endif
value_type h_data[SIZE];
cudaThreadSynchronize();
cudaMemcpy(h_data, data, SIZE*sizeof(value_type), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
for (int i = 0; i < SIZE; ++i)
printf("%d ", h_data[i]);
puts("\n");
return 0;
}
|
9,217 | /*
Authors
- Dibyadarshan Hota 16CO154
- Omkar Prabhu 16CO233
*/
#include <iostream>
#include <stdio.h>
#include <sstream>
#include <string.h>
#include <cuda.h>
#define ll long long
using namespace std;
// ============== Kernel for betweenness calculation ========================
__global__
void betweenness_centrality_kernel (int nodes, int *C, int *R, int *d, int *sigma, float *delta, float *bc, int *reverse_stack) {
// Used to store the position where nodes are pushed as a stack
__shared__ int position;
// Used to store the source vertex
__shared__ int s;
//__shared__ int end_pos;
int idx = threadIdx.x;
if (idx == 0) {
// Initializing source
s = 0;
//end_pos = 1;
//reverse_bfs_limit[0] = 0;
}
__syncthreads();
while (s < nodes) {
__syncthreads();
// ============== Vertex parallel method for BFS ========================
//Initialize d and sigma
for(int v=idx; v<nodes; v+=blockDim.x) {
if(v == s) {
d[v] = 0;
sigma[v] = 1;
}
else {
d[v] = INT_MAX;
sigma[v] = 0;
}
delta[v] = 0;
}
__syncthreads();
__shared__ int current_depth;
__shared__ bool done;
// ============== INIT ========================
if(idx == 0) {
done = false;
current_depth = 0;
position = 0;
}
__syncthreads();
// SP Calc
while(!done)
{
__syncthreads();
done = true;
__syncthreads();
for(int v=idx; v<nodes; v+=blockDim.x) {
if(d[v] == current_depth) {
// ============== Storing nodes for reverse BFS ========================
int t = atomicAdd(&position,1);
reverse_stack[t] = v;
// ============== Relaxation step to find minimum distance ========================
for(int r=R[v]; r<R[v+1]; r++) {
int w = C[r];
if(d[w] == INT_MAX) {
d[w] = d[v] + 1;
done = false;
}
if(d[w] == (d[v] + 1)) {
atomicAdd(&sigma[w],sigma[v]);
}
}
}
}
__syncthreads();
if(idx == 0){
current_depth++;
//reverse_bfs_limit[end_pos] = position;
//++end_pos;
}
}
// Parallel Vertex Parallel implementation (uncomment the following lines and comment the ones below)
__syncthreads();
// atomicSub(&end_pos,2);
// for(int itr1 = end_pos; itr1 >= 0; --itr1){
// for(int itr2 = reverse_bfs_limit[itr1] + idx; itr2 < reverse_bfs_limit[itr1+1]; itr2+=blockDim.x){
// // reverse_stack[itr2] is one node
// for(int itr3 = R[reverse_stack[itr2]]; itr3 < R[reverse_stack[itr2] + 1]; ++itr3){
// int consider = C[itr3];
// // C[itr3] other node
// if(d[consider] == d[reverse_stack[itr2]]-1){
// delta[consider] += ( ((float)sigma[consider]/sigma[reverse_stack[itr2]]) * ((float)1 + delta[reverse_stack[itr2]]) );
// }
// }
// if(reverse_stack[itr2] != s){
// bc[reverse_stack[itr2]] += delta[reverse_stack[itr2]];
// }
// }
// __syncthreads();
// }
// Serialized Vertex Parallel implementation. Comment the following for parallel implementation
if(idx == 0){
for(int itr1 = nodes - 1; itr1 >= 0; --itr1){
for(int itr2 = R[reverse_stack[itr1]]; itr2 < R[reverse_stack[itr1] + 1]; ++itr2){
int consider = C[itr2];
if(d[consider] == d[reverse_stack[itr1]]-1){
delta[consider] += ( ((float)sigma[consider]/sigma[reverse_stack[itr1]]) * ((float)1 + delta[reverse_stack[itr1]]) );
}
}
if(reverse_stack[itr1] != s){
bc[reverse_stack[itr1]] += delta[reverse_stack[itr1]];
}
}
}
// ============== Incrementing source ========================
__syncthreads();
if (idx == 0) {
s += 1;
}
}
}
int main () {
// Uncomment for reading files in stdin
// freopen("graph", "r", stdin);
// ============== INIT ========================
// nodes and edges
int nodes, edges;
cin>>nodes>>edges;
// compressed adjancency list
int * V = new int[nodes + 1];
int * E = new int[2 * edges];
// ============== Formation of compressed adjacency for CSR ========================
string line;
int node = 0;
int counter = 0;
getline(cin, line);
for (int i = 0; i < nodes; ++i) {
getline(cin, line);
V[node] = counter;
istringstream is(line);
int tmp;
while (is >> tmp) {
E[counter] = tmp;
counter += 1;
}
++node;
}
V[node] = counter;
// Uncomment for printing compressed adjacency list
// cout<<"\n";
// for (int i = 0; i <= nodes; i++) {
// cout<<V[i]<<" ";
// }
// cout<<"\n";
// for (int i = 0; i < 2 * edges; ++i) {
// cout<<E[i]<<" ";
// }
// cout<<"\n";
// Initializations
int *d = new int[nodes];
int *sigma = new int[nodes];
float *delta = new float[nodes];
float *bc = new float[nodes];
memset(bc,0,sizeof(bc));
int *d_d, *d_sigma, *d_V, *d_E, *d_reverse_stack;
float *d_delta, *d_bc;
// Allocating memory via cudamalloc
cudaMalloc((void**)&d_d, sizeof(int) * nodes);
// cudaMalloc((void**)&d_end_point, sizeof(int) * (nodes + 1));
cudaMalloc((void**)&d_sigma, sizeof(int) * nodes);
cudaMalloc((void**)&d_reverse_stack, sizeof(int) * nodes);
cudaMalloc((void**)&d_V, sizeof(int) * (nodes + 1));
cudaMalloc((void**)&d_E, sizeof(int) * (2*edges));
cudaMalloc((void**)&d_delta, sizeof(float) * nodes);
cudaMalloc((void**)&d_bc, sizeof(float) * nodes);
cudaMemcpy(d_V, V, sizeof(int) * (nodes+1), cudaMemcpyHostToDevice);
cudaMemcpy(d_E, E, sizeof(int) * (2*edges), cudaMemcpyHostToDevice);
cudaMemcpy(d_bc, bc, sizeof(float) * (nodes), cudaMemcpyHostToDevice);
// cudaMemcpy(d_delta, delta, sizeof(float) * (nodes), cudaMemcpyHostToDevice);
// ============== Kernel call ========================
betweenness_centrality_kernel <<<1, 256>>> (nodes, d_E, d_V, d_d, d_sigma, d_delta, d_bc, d_reverse_stack);
// cudaMemcpy(d, d_d, sizeof(float) * nodes, cudaMemcpyDeviceToHost);
// cudaMemcpy(sigma, d_sigma, sizeof(float) * nodes, cudaMemcpyDeviceToHost);
cudaMemcpy(bc, d_bc, sizeof(float) * nodes, cudaMemcpyDeviceToHost);
// cudaMemcpy(delta, d_delta, sizeof(float) * nodes, cudaMemcpyDeviceToHost);
cout<<"Res: \n";
for (int i = 0; i < nodes; i++) {
printf("%f ", bc[i]/2.0);
// cout<<bc[i];
}
cout<<endl;
// ============== Deallocating memory ========================
cudaFree(d_sigma);
cudaFree(d_d);
cudaFree(d_V);
cudaFree(d_E);
cudaFree(d_delta);
cudaFree(d_bc);
cudaFree(d_reverse_stack);
// cudaFree(d_end_point);
free(E);
free(V);
free(d);
free(sigma);
free(delta);
free(bc);
return 0;
}
|
9,218 | #include <stdio.h>
#include <math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#define DATA_SIZE 10
template <class R, class S, class T>
T hypothesis_test(R hypothesis_value, T alpha, S test_type){
thrust::host_vector<T> data(DATA_SIZE);
for (int i=0; i < data.size(); i++){
data[i] = i + 1;
}
thrust::device_vector<T> D1 = data;
thrust::device_vector<T> D2(data.size());
T mean = thrust::reduce( D1.begin(),
D1.end(),
(T)0,
thrust::plus<T>())/data.size();
thrust::transform( D1.begin(),
D1.end(),
thrust::make_constant_iterator(mean),
D1.begin(),
thrust::minus<T>());
thrust::transform( D1.begin(),
D1.end(),
D1.begin(),
D2.begin(),
thrust::multiplies<T>());
T variance = thrust::reduce( D2.begin(),
D2.end(),
(T)0,
thrust::plus<T>())/(data.size()-1);
T standard_deviation = sqrt(variance);
T Z = (mean - hypothesis_value)/(standard_deviation/sqrt(data.size()));
T left = (0.5)*(1.0 + erf(Z/sqrt(2.0)));
T right = 1.0 - (0.5)*(1.0 + erf(Z/sqrt(2.0)));
T two_sided = 2.0*(1.0 - (0.5)*(1.0 + erf(abs(Z)/sqrt(2.0))));
if (test_type == 1){
if(left < alpha){
printf("We reject the null hypothesis\n");
return left;
}
else{
printf("We fail to reject the null hypothesis\n");
return left;
}
}
else if(test_type == 2){
if(right < alpha){
printf("We reject the null hypothesis \n");
return right;
}
else{
printf("We fail to reject the null hypothesis \n");
return right;
}
}
else if(test_type == 3){
if(two_sided < alpha){
printf("We reject the null hypothesis \n");
return two_sided;
}
else{
printf("we fail to reject the null hypothesis \n");
return two_sided;
}
}
else return 0;
}
|
9,219 | #include <cuda.h>
#include <stdio.h>
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n",\
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}}
__global__ void gTest(float* a){
a[threadIdx.x+blockDim.x*blockIdx.x]=(float)(threadIdx.x+blockDim.x*blockIdx.x);
}
__global__ void sum (float* a, float* b)
{
a[threadIdx.x+blockDim.x*blockIdx.x]+=b[threadIdx.x+blockDim.x*blockIdx.x];
}
int main(int argc, char* argv[]){
float *da, *db, *ha;
int num_of_blocks=1<<2, threads_per_block=1<<2;
int N=num_of_blocks*threads_per_block;
float elapsedTime;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
ha=(float*)calloc(N, sizeof(float));
CUDA_CHECK_RETURN(cudaMalloc((void**)&da,N*sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&db,N*sizeof(float)));
gTest<<<dim3(num_of_blocks), dim3(threads_per_block)>>>(da);
gTest<<<dim3(num_of_blocks), dim3(threads_per_block)>>>(db);
cudaEventRecord(start,0);
sum<<<dim3(num_of_blocks), dim3(threads_per_block)>>>(da,db);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
CUDA_CHECK_RETURN(cudaGetLastError());
cudaEventElapsedTime(&elapsedTime,start,stop);
fprintf(stderr,"gTest took %g\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
CUDA_CHECK_RETURN(cudaMemcpy(ha,da,N*sizeof(float), cudaMemcpyDeviceToHost));
for(int i=0;i<N;i++)
printf("%g\n",ha[i]);
free(ha);
cudaFree(da);
cudaFree(db);
return 0;
}
|
9,220 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <iostream>
#include <algorithm>
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
__global__ void Histohram(int* devCount, int* arr, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offsetx = blockDim.x * gridDim.x;
for (int i = idx; i < size; i+= offsetx)
{
atomicAdd(&devCount[arr[i]], 1);
}
}
__global__ void CountSort(int* devScan, int* arr, int* out, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offsetx = blockDim.x * gridDim.x;
for (int i = idx; i < size; i += offsetx)
{
out[atomicAdd(&devScan[arr[i]], -1) - 1] = arr[i];
}
}
const int BLOCK_SIZE = 1024;
__global__ void KernelBlockScan(int* devArr, int* newDevArr)
{
int blockSize = blockDim.x;
__shared__ int arr[BLOCK_SIZE];
int idx = blockDim.x * blockIdx.x + threadIdx.x;
arr[threadIdx.x] = devArr[idx];
__syncthreads();
int d = 1;
while (d < blockSize)
{
if (2 * d + threadIdx.x * 2 * d - 1 < blockSize)
arr[2 * d + threadIdx.x * 2 * d - 1] += arr[d + threadIdx.x * 2 * d - 1];
d *= 2;
__syncthreads();
}
int last = 0;
if (threadIdx.x == blockSize - 1)
{
last = arr[threadIdx.x];
arr[threadIdx.x] = 0;
}
d /= 2;
__syncthreads();
while (d >= 1)
{
if (d * 2 * threadIdx.x + 2 * d - 1 < blockSize)
{
auto t = arr[d * 2 * threadIdx.x + d - 1];
arr[d * 2 * threadIdx.x + d - 1] = arr[d * 2 * threadIdx.x + 2 * d - 1];
arr[d * 2 * threadIdx.x + 2 * d - 1] += t;
}
d /= 2;
__syncthreads();
}
if (threadIdx.x == blockSize - 1)
{
devArr[idx] = last;
newDevArr[blockIdx.x] = last;
}
else
{
devArr[idx] = arr[threadIdx.x + 1];
}
}
__global__ void KernelBlockShift(int* devArr, int* newArr)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (blockIdx.x > 0)
devArr[idx] += newArr[blockIdx.x - 1];
}
int Max(int a, int b)
{
return a > b
? a
: b;
}
int Min(int a, int b)
{
return a < b
? a
: b;
}
void Scan(int* devCount, int size)
{
int blockCount = Max(1, size / BLOCK_SIZE);
int blockSize = Min(size, BLOCK_SIZE);
int* newDevCount;
cudaMalloc((void**)&newDevCount, sizeof(int) * blockCount);
KernelBlockScan<<< blockCount, blockSize >>>(devCount, newDevCount);
cudaDeviceSynchronize();
if (size > BLOCK_SIZE)
{
Scan(newDevCount, size / BLOCK_SIZE);
KernelBlockShift<<<size / BLOCK_SIZE, BLOCK_SIZE >>>(devCount, newDevCount);
cudaDeviceSynchronize();
}
cudaFree(newDevCount);
}
using namespace std;
const int MAX_NUMBER = 16777215;
__global__ void TestAdd(int* devInt)
{
atomicAdd(&devInt[0], 1);
}
int main(int argc, const char** argv)
{
int size;
fread(&size, sizeof(int), 1, stdin);
auto hostArray = new int[size];
fread(hostArray, sizeof(int), size, stdin);
int* devCount;
CSC(cudaMalloc((void**)&devCount, sizeof(int) * (MAX_NUMBER + 1)));
CSC(cudaMemset(devCount, 0, sizeof(int) * (MAX_NUMBER + 1)));
int* devArray;
CSC(cudaMalloc((void**)&devArray, sizeof(int) * size));
CSC(cudaMemcpy(devArray, hostArray, sizeof(int) * size, cudaMemcpyHostToDevice));
Histohram<<<256, 256>>>(devCount, devArray, size);
cudaDeviceSynchronize();
Scan(devCount, MAX_NUMBER + 1);
int* outDevArray;
CSC(cudaMalloc((void**)&outDevArray, sizeof(int) * size));
CountSort<<<256, 256>>>(devCount, devArray, outDevArray, size);
cudaDeviceSynchronize();
CSC(cudaMemcpy(hostArray, outDevArray, sizeof(int) * size, cudaMemcpyDeviceToHost));
fwrite(hostArray, sizeof(int), size, stdout);
} |
9,221 | #include "includes.h"
__global__ void kMultiSoftmaxCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs, float* top5Probs, const int numCases, const int numOut, const int setSize) {
const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x;
if (tx < numCases) {
const int label = int(labels[tx]);
const float maxp = maxProbs[tx];
const float labelp = probs[label * numCases + tx];
labelLogProbs[tx] = __logf(labelp);
int numBiggerProbs = 0, numEqualsProbs = 0;
for (int i = 0; i < numOut; ++i) {
numBiggerProbs += probs[i * numCases + tx] > labelp;
numEqualsProbs += probs[i * numCases + tx] == labelp;
}
const int slotsLeft = setSize - numBiggerProbs;
top5Probs[tx] = slotsLeft <= 0.0f ? 0.0f : (numEqualsProbs <= slotsLeft ? 1.0f : float(slotsLeft) / numEqualsProbs);
correctProbs[tx] = labelp != maxp ? 0.0f : 1.0f / float(numEqualsProbs);
}
} |
9,222 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void AddVecGPU(float* c, float* a, float* b)
{
//for . ȭ ƴ
int i = threadIdx.x;
c[i] = a[i] + b[i];
printf("Tread Id = %d\n", i);
}
int main()
{
int N = 1024 * 1024;
float* a = new float[N];
float* b = new float[N];
float* c = new float[N];
for (int i = 0; i < N; ++i)
{
//rand() : 0~ RAND MAX
a[i] = rand() / (float)RAND_MAX; //0.0 ~ 1.0
b[i] = -a[i];
c[i] = 0.0;
}
// 1. gpu
cudaError_t cudaStatus = cudaSetDevice(0);
// 2. gpu(Device) ҴѴ.
float* dev_a, * dev_b, * dev_c;
cudaMalloc((void**)&dev_a, sizeof(float) * N);
cudaMalloc((void**)&dev_b, sizeof(float) * N);
cudaMalloc((void**)&dev_c, sizeof(float) * N);
// 3. cpu 迭 gpu 迭 Ų
cudaMemcpy(dev_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, sizeof(float) * N, cudaMemcpyHostToDevice);
// 4. gpu Լ(= Ŀ, kernel) ȣ
AddVecGPU<<<1, N>>>(dev_c, dev_a, dev_b);
// 尡 ٸ.
cudaDeviceSynchronize();
// 5. GPU CPU Ѵ.
cudaMemcpy(c, dev_c, sizeof(float) * N, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i)
{
// printf("c[%d] = %f\n", i, c[i]);
if (c[i] != 0.0)
printf("Error\n");
}
delete[] a;
delete[] b;
delete[] c;
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
} |
9,223 | #include "includes.h"
__device__ void get_conflict_node_id(short *deleted_rows, int *row_group, const int search_depth, int *conflict_node_id, const int total_dl_matrix_row_num) {
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
if (row_group[i] == search_depth + 1 &&
deleted_rows[i] < search_depth + 1) {
atomicMax(conflict_node_id, deleted_rows[i]);
}
}
}
__global__ void get_conflict_node_id(int *deleted_rows, int *row_group, const int search_depth, int *conflict_node_id, const int total_dl_matrix_row_num) {
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
if (row_group[i] == search_depth + 1 && deleted_rows[i] < search_depth+1) {
atomicMax(conflict_node_id, deleted_rows[i]);
}
}
__syncthreads();
} |
9,224 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of sum
__global__ void vector_add(double *a, double *b, double *sum, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
sum[id] = a[id] + b[id];
}
int main()
{
// Size of vectors
int n = 100000;
// Host input vectors
double *host_p;
double *host_q;
//Host output vector
double *host_sum;
// Device input vectors
double *device_p;
double *device_q;
//Device output vector
double *device_sum;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
host_p = (double*)malloc(bytes);
host_q = (double*)malloc(bytes);
host_sum = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&device_p, bytes);
cudaMalloc(&device_q, bytes);
cudaMalloc(&device_sum, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
host_p[i] =((float)rand()/(float)RAND_MAX)*100;
host_q[i] =((float)rand()/(float)RAND_MAX)*100;
}
// Copy host vectors to device
cudaMemcpy( device_p, host_p, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( device_q, host_q, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vector_add<<<gridSize, blockSize>>>(device_p, device_q, device_sum, n);
// Copy array back to host
cudaMemcpy( host_sum, device_sum, bytes, cudaMemcpyDeviceToHost );
for(i=0; i<n; i++)
printf("%f+%f=%f\n",host_p[i],host_q[i],host_sum[i]);
// Release device memory
cudaFree(device_p);
cudaFree(device_q);
cudaFree(device_sum);
// Release host memory
free(host_p);
free(host_q);
free(host_sum);
return 0;
}
|
9,225 | #include "includes.h"
__global__ void kernel_s(unsigned int * ind, const size_t nbn, const unsigned int ne)
{
int m_i_b = threadIdx.x;
if (m_i_b >= ne) return;
extern __shared__ float dats[];
dats[m_i_b] = ind[m_i_b];
__syncthreads();
for (int q = 1; q < nbn; q *= 2) {
if (m_i_b >= q) {
dats[m_i_b] += dats[m_i_b - q];
}
__syncthreads();
}
if (m_i_b == 0) ind[0] = 0;
else ind[m_i_b] = dats[m_i_b - 1];
} |
9,226 | //#include <cudaDefs.h>
//#include <time.h>
//#include <math.h>
//#include <random>
//
////WARNING!!! Do not change TPB and NO_FORCES for this demo !!!
//constexpr unsigned int TPB = 128; //thred per block
//constexpr unsigned int NO_FORCES = 256;
//constexpr unsigned int NO_RAIN_DROPS = 1 << 20;
//
//constexpr unsigned int MEM_BLOCKS_PER_THREAD_BLOCK = 8;
//constexpr unsigned int THREAD_PER_BLOCK = 50;
//
//cudaError_t error = cudaSuccess;
//cudaDeviceProp deviceProp = cudaDeviceProp();
//
//using namespace std;
//
//float3 *createData(const unsigned int length)
//{
// //TODO: Generate float3 vectors. You can use 'make_float3' method.
// float3 *data = static_cast<float3*>(::operator new(sizeof(float3)*length));
///*
// std::random_device rd;
// std::mt19937 gen(rd());
// std::uniform_int_distribution<float> dis(0.0f, 1.0f);
//*/
// for (unsigned int i = 0; i < length; i++)
// {
///*
//
// data[i].x = dis(gen);
// data[i].y = dis(gen);
// data[i].z = dis(gen);
// */
// data[i] = make_float3(1, 1, 1);
//
// }
//
// return data;
//}
//
//void printData(const float3 *data, const unsigned int length)
//{
// if (data == 0) return;
// const float3 *ptr = data;
// for (unsigned int i = 0; i<length; i++, ptr++)
// {
// printf("%5.2f %5.2f %5.2f ", ptr->x, ptr->y, ptr->z);
// }
//}
//
//////////////////////////////////////////////////////////////////////////////////////////////////////
///// <summary> Sums the forces to get the final one using parallel reduction.
///// WARNING!!! The method was written to meet input requirements of our example, i.e. 128 threads and 256 forces </summary>
///// <param name="dForces"> The forces. </param>
///// <param name="noForces"> The number of forces. </param>
///// <param name="dFinalForce"> [in,out] If non-null, the final force. </param>
//////////////////////////////////////////////////////////////////////////////////////////////////////
//__global__ void reduce(const float3 * __restrict__ dForces, const unsigned int noForces, float3* __restrict__ dFinalForce)
//{
// __shared__ float3 sForces[TPB]; //SEE THE WARNING MESSAGE !!!
// unsigned int tid = threadIdx.x;
// unsigned int next = TPB; //SEE THE WARNING MESSAGE !!!
//
//
//
//
// float3* src = (float3*)&sForces[tid];
// float3* src2 = (float3*)&dForces[tid + next];
//
//
// *src = dForces[tid];
//
//
//
// src->x += src2->x;
// src->y += src2->y;
// src->z += src2->z;
//
// __syncthreads();
//
// next >>= 1; // 64
// if (tid >= next) return;
//
// src2 = src + next;
//
// src->x += src2->x;
// src->y += src2->y;
// src->z += src2->z;
//
// __syncthreads();
//
// next >>= 1; // 32
// if (tid >= next) return;
//
// src2 = src + next;
//
// src->x += src2->x;
// src->y += src2->y;
// src->z += src2->z;
//
//
// volatile float3 *vsrc = &sForces[tid];
// volatile float3 *vsrc2 = vsrc + next;
//
// next >>= 1; // 16
// if (tid >= next) return;
//
// vsrc2 = vsrc + next;
//
// vsrc->x += vsrc2->x;
// vsrc->y += vsrc2->y;
// vsrc->z += vsrc2->z;
//
// next >>= 1; // 8
// if (tid >= next) return;
//
// vsrc2 = vsrc + next;
//
// vsrc->x += vsrc2->x;
// vsrc->y += vsrc2->y;
// vsrc->z += vsrc2->z;
//
// next >>= 1; // 4
// if (tid >= next) return;
//
// vsrc2 = vsrc + next;
//
// vsrc->x += vsrc2->x;
// vsrc->y += vsrc2->y;
// vsrc->z += vsrc2->z;
//
// next >>= 1; // 2
// if (tid >= next) return;
//
// vsrc2 = vsrc + next;
//
// vsrc->x += vsrc2->x;
// vsrc->y += vsrc2->y;
// vsrc->z += vsrc2->z;
//
// next >>= 1; // 1
// if (tid >= next) return;
//
// vsrc2 = vsrc + next;
//
// vsrc->x += vsrc2->x;
// vsrc->y += vsrc2->y;
// vsrc->z += vsrc2->z;
//
// if (tid == 0)
// {
// dFinalForce->x = vsrc->x;
// dFinalForce->y = vsrc->y;
// dFinalForce->z = vsrc->z;
// }
//}
//
//////////////////////////////////////////////////////////////////////////////////////////////////////
///// <summary> Adds the FinalForce to every Rain drops position. </summary>
///// <param name="dFinalForce"> The final force. </param>
///// <param name="noRainDrops"> The number of rain drops. </param>
///// <param name="dRainDrops"> [in,out] If non-null, the rain drops positions. </param>
//////////////////////////////////////////////////////////////////////////////////////////////////////
//__global__ void add(const float3* __restrict__ dFinalForce, const unsigned int noRainDrops, float3* __restrict__ dRainDrops)
//{
//
// unsigned int bid = blockIdx.x * MEM_BLOCKS_PER_THREAD_BLOCK + threadIdx.x;
//#pragma unroll MEM_BLOCKS_PER_THREAD_BLOCK
// for (size_t i = 0; i < MEM_BLOCKS_PER_THREAD_BLOCK; i++)
// {
// auto tid = bid + i;
// if (tid >= noRainDrops)
// return;
// dRainDrops[tid].x += dFinalForce->x;
// dRainDrops[tid].y += dFinalForce->y;
// dRainDrops[tid].z += dFinalForce->z;
// }
//}
//
//
//int main(int argc, char *argv[])
//{
// initializeCUDA(deviceProp);
//
// cudaEvent_t startEvent, stopEvent;
// float elapsedTime;
//
// cudaEventCreate(&startEvent);
// cudaEventCreate(&stopEvent);
// cudaEventRecord(startEvent, 0);
//
// float3 *hForces = createData(NO_FORCES);
// float3 *hDrops = createData(NO_RAIN_DROPS);
//
// float3 *dForces = nullptr;
// float3 *dDrops = nullptr;
// float3 *dFinalForce = nullptr;
//
// error = cudaMalloc((void**)&dForces, NO_FORCES * sizeof(float3));
// error = cudaMemcpy(dForces, hForces, NO_FORCES * sizeof(float3), cudaMemcpyHostToDevice);
//
// error = cudaMalloc((void**)&dDrops, NO_RAIN_DROPS * sizeof(float3));
// error = cudaMemcpy(dDrops, hDrops, NO_RAIN_DROPS * sizeof(float3), cudaMemcpyHostToDevice);
//
// error = cudaMalloc((void**)&dFinalForce, sizeof(float3));
//
// KernelSetting ksReduce;
//
// ksReduce.dimGrid = dim3(1, 1,1);
// ksReduce.dimBlock = dim3(TPB, 1, 1);
//
//
// KernelSetting ksAdd;
// ksAdd.dimGrid = dim3(TPB, 1,1);
// ksAdd.dimBlock = dim3(getNumberOfParts(NO_RAIN_DROPS, TPB * MEM_BLOCKS_PER_THREAD_BLOCK), 1, 1);
//
// for (unsigned int i = 0; i<1000; i++)
// {
// reduce<<<ksReduce.dimGrid, ksReduce.dimBlock>>>(dForces, NO_FORCES, dFinalForce);
// add<<<ksAdd.dimGrid, ksAdd.dimBlock>>>(dFinalForce, NO_RAIN_DROPS, dDrops);
// }
//
// checkDeviceMatrix<float>((float*)dFinalForce, sizeof(float3), 1, 3, "%5.2f ", "Final force");
// checkDeviceMatrix<float>((float*)dDrops, sizeof(float3), NO_RAIN_DROPS, 3, "%5.2f ", "Final Rain Drops");
//
// if (hForces)
// free(hForces);
// if (hDrops)
// free(hDrops);
//
// cudaFree(dForces);
// cudaFree(dDrops);
//
// cudaEventRecord(stopEvent, 0);
// cudaEventSynchronize(stopEvent);
//
// cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent);
// cudaEventDestroy(startEvent);
// cudaEventDestroy(stopEvent);
//
// printf("Time to get device properties: %f ms", elapsedTime);
//}
|
9,227 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 1000000
//Kernel to perform the multiplication of array elements
__global__ void vect_mul(int*a, int *b, int*c)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid < N)
c[tid]=a[tid]*b[tid];
}
void vect_mul_cpu(int* a, int* b, int* c)
{
for(int i=0;i<N;i++)
{
a[i]=b[i]*c[i];
}
}
int* initialize(int* ptr)
{
for(int i=0;i<N;i++)
{
ptr[i]=rand() % 10000;
}
return ptr;
}
int main()
{
//Array declaration and initialization for CPU side
int *a = (int*)malloc(N*sizeof(int));
int *b = (int*)malloc(N*sizeof(int));
//Initializing array with random values
a=initialize(a);
b=initialize(b);
int* c = (int*)malloc(N*sizeof(int)); //For cpu execution
int *ptr_c_cpu =(int*)malloc(N*sizeof(int)); //For copying value from gpu to cpu
int *ptr_a_cpu = a;
int *ptr_b_cpu = b;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Declaring pointer variables for the GPU
int *ptr_a_gpu;
int *ptr_b_gpu;
int *ptr_c_gpu;
//Allocating memory to pointer variables in the GPU
cudaMalloc((void **)&ptr_a_gpu, N * sizeof(int));
cudaMalloc((void **)&ptr_b_gpu, N * sizeof(int));
cudaMalloc((void **)&ptr_c_gpu, N * sizeof(int));
//Copying contents of variable from cpu(host) to gpu(device)
cudaMemcpy(ptr_a_gpu, ptr_a_cpu, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(ptr_b_gpu, ptr_b_cpu, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(ptr_c_gpu, ptr_c_cpu, N * sizeof(int), cudaMemcpyHostToDevice);
//Calling the kernel to perform execution on gpu
cudaEventRecord(start);
vect_mul<<<1000,1000>>>(ptr_a_gpu,ptr_b_gpu,ptr_c_gpu);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
//Copying the result array from gpu(device) to cpu(host)
cudaMemcpy(ptr_c_cpu,ptr_c_gpu,N*sizeof(int),cudaMemcpyDeviceToHost);
float time_taken_gpu = 0.0;
cudaEventElapsedTime(&time_taken_gpu, start, stop);
clock_t t;
t=clock();
vect_mul_cpu(a,b,c); //Calling the function for cpu execution
t=clock()-t;
double time_taken_cpu = ((double)t)/CLOCKS_PER_SEC; //Recorded in seconds
time_taken_cpu*=1000.0;
printf("Time taken by CPU:%f ms\n",time_taken_cpu);
printf("Time taken by GPU:%f ms\n",time_taken_gpu);
//Freeing space in host memory
free(ptr_c_cpu);
free(a);
free(b);
free(c);
//Freeing space in gpu
cudaFree(ptr_a_gpu);
cudaFree(ptr_b_gpu);
cudaFree(ptr_c_gpu);
}
|
9,228 | #include <stdio.h>
// Macro for checking errors in CUDA API calls
#define cudaErrorCheck(call) \
do{ \
cudaError_t cuErr = call; \
if(cudaSuccess != cuErr){ \
printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr));\
exit(0); \
} \
}while(0)
// Size of array
#define N 1048576
// Kernel
__global__ void add_vectors_cuda(double *a, double *b, double *c)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < N) c[id] = a[id] + b[id];
}
// Main program
int main()
{
// Number of bytes to allocate for N doubles
size_t bytes = N*sizeof(double);
// Allocate memory for arrays A, B, and C on host
double *A = (double*)malloc(bytes);
double *B = (double*)malloc(bytes);
double *C = (double*)malloc(bytes);
// Allocate memory for arrays d_A, d_B, and d_C on device
double *d_A, *d_B, *d_C;
cudaErrorCheck( cudaMalloc(&d_A, bytes) );
cudaErrorCheck( cudaMalloc(&d_B, bytes) );
cudaErrorCheck( cudaMalloc(&d_C, bytes) );
// Fill host arrays A, B, and C
for(int i=0; i<N; i++)
{
A[i] = 1.0;
B[i] = 2.0;
C[i] = 0.0;
}
// Copy data from host arrays A and B to device arrays d_A and d_B
cudaErrorCheck( cudaMemcpy(d_A, A, bytes, cudaMemcpyHostToDevice) );
cudaErrorCheck( cudaMemcpy(d_B, B, bytes, cudaMemcpyHostToDevice) );
// Set execution configuration parameters
// thr_per_blk: number of CUDA threads per grid block
// blk_in_grid: number of blocks in grid
int thr_per_blk = 256;
int blk_in_grid = ceil( float(N) / thr_per_blk );
// Launch kernel
add_vectors_cuda<<< blk_in_grid, thr_per_blk >>>(d_A, d_B, d_C);
// Check for errors in kernel launch (e.g. invalid execution configuration paramters)
cudaError_t cuErrSync = cudaGetLastError();
// Check for errors on the GPU after control is returned to CPU
cudaError_t cuErrAsync = cudaDeviceSynchronize();
if (cuErrSync != cudaSuccess) { printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErrSync)); exit(0); }
if (cuErrAsync != cudaSuccess) { printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErrAsync)); exit(0); }
// Copy data from device array d_C to host array C
cudaErrorCheck( cudaMemcpy(C, d_C, bytes, cudaMemcpyDeviceToHost) );
// Verify results
double tolerance = 1.0e-14;
for(int i=0; i<N; i++)
{
if( fabs(C[i] - 3.0) > tolerance )
{
printf("Error: value of C[%d] = %f instead of 3.0\n", i, C[i]);
exit(-1);
}
}
// Free CPU memory
free(A);
free(B);
free(C);
// Free GPU memory
cudaErrorCheck( cudaFree(d_A) );
cudaErrorCheck( cudaFree(d_B) );
cudaErrorCheck( cudaFree(d_C) );
printf("\n---------------------------\n");
printf("__SUCCESS__\n");
printf("---------------------------\n");
printf("N = %d\n", N);
printf("Threads Per Block = %d\n", thr_per_blk);
printf("Blocks In Grid = %d\n", blk_in_grid);
printf("---------------------------\n\n");
return 0;
}
|
9,229 | #include <stdio.h>
#define BSIZE 256
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
__global__ void transpose(int nRows, int nCols, float* devData, float* devTranData) {
int pointId = threadIdx.x;
while(pointId < nRows) {
int inPos = pointId*nCols;
int i;
for(i = 0; i < nCols; i++)
devTranData[pointId+i*nRows] = devData[inPos+i];
pointId += BSIZE;
}
}
void transposeData(int nRows, int nCols, float* data, float* transposedData) {
float* devData;
CHECK(cudaMalloc((float**)&devData, sizeof(float)*nRows*nCols));
CHECK(cudaMemcpy(devData, data, sizeof(float)*nRows*nCols, cudaMemcpyHostToDevice));
float* devTranData;
CHECK(cudaMalloc((float**)&devTranData, sizeof(float)*nRows*nCols));
transpose<<<1, BSIZE>>>(nRows, nCols, devData, devTranData);
CHECK(cudaMemcpy(transposedData, devTranData, sizeof(float)*nRows*nCols, cudaMemcpyDeviceToHost));
CHECK(cudaFree(devData));
CHECK(cudaFree(devTranData));
} |
9,230 | #include <fstream>
#include <sstream>
#include <iostream>
#include <cuda.h>
#include <cmath>
#include <vector>
using namespace std;
const int T_P_B = 256;
const float PI = 3.14159265358979f;
/*
module purge
module load gcc/4.9.0
module load cmake/3.9.1
module load cuda
qsub -I -q coc-ice -l nodes=1:ppn=2:gpus=1,walltime=12:00:00,pmem=2gb
./p3 forward Tower256.txt out256.txt
*/
class gComplex {
public:
__device__ __host__ gComplex() : real(0.0f), imag(0.0f) {}
__device__ __host__ gComplex(float r, float i) : real(r), imag(i) {}
__device__ __host__ gComplex(float r) : real(r), imag(0.0f) {}
__device__ __host__ gComplex operator+(const gComplex& b) const {
gComplex res;
res.real = this->real + b.real;
res.imag = this->imag + b.imag;
return res;
}
__device__ __host__ gComplex operator-(const gComplex& b) const {
gComplex res;
res.real = this->real - b.real;
res.imag = this->imag - b.imag;
return res;
}
__device__ __host__ gComplex operator*(const gComplex& b) const {
gComplex res;
res.real = this->real * b.real - this->imag * b.imag;
res.imag = this->real * b.imag + this->imag * b.real;
return res;
}
__device__ __host__ gComplex mag() const {
gComplex res;
res.real = sqrt((this->real * this->real) + (this->imag * this->imag));
res.imag = 0;
return res;
}
__device__ __host__ gComplex angle() const {
gComplex res;
// (0, 0)
if (this->real == 0 && this->imag == 0) {
res.real = 0;
res.imag = 0;
return res;
}
float temp = this->imag / this->real;
res.real = atan(temp);
if (this->real < 0 && this->imag >= 0) { // 2nd quadrant
res.real = res.real + PI;
} else if (this->real < 0 && this->imag < 0) { // 3rd quadrant
res.real = res.real - PI;
} else if (this->real > 0 && this->imag < 0) { // 4th quadrant
res.real = -1 * res.real;
}
res.imag = 0;
return res;
}
__device__ __host__ gComplex conj() const {
gComplex res;
res.real = this->real;
res.imag = -1 * this->imag;
return res;
}
float real;
float imag;
};
std::ostream& operator<<(std::ostream& os, const gComplex& rhs) {
gComplex c(rhs);
if(fabsf(rhs.imag) < 1e-10) c.imag = 0.0f;
if(fabsf(rhs.real) < 1e-10) c.real = 0.0f;
if(c.imag == 0) {
os << c.real;
}
else {
os << "(" << c.real << "," << c.imag << ")";
}
return os;
}
class InputImage {
public:
InputImage(const char* filename) {
std::ifstream ifs(filename);
if(!ifs) {
std::cout << "Can't open image file " << filename << std::endl;
exit(1);
}
ifs >> w >> h;
data = new gComplex[w * h];
for(int r = 0; r < h; ++r) {
for(int c = 0; c < w; ++c) {
float real;
ifs >> real;
data[r * w + c] = gComplex(real);
}
}
}
int get_width() const {
return w;
}
int get_height() const {
return h;
}
//returns a pointer to the image data. Note the return is a 1D
//array which represents a 2D image. The data for row 1 is
//immediately following the data for row 0 in the 1D array
gComplex* get_image_data() const {
return data;
}
//use this to save output from forward DFT
void save_image_data(const char* filename, gComplex* d, int w, int h) {
std::ofstream ofs(filename);
if(!ofs) {
std::cout << "Can't create output image " << filename << std::endl;
return;
}
ofs << w << " " << h << std::endl;
for(int r = 0; r < h; ++r) {
for(int c = 0; c < w; ++c) {
ofs << d[r * w + c] << " ";
}
ofs << std::endl;
}
}
//use this to save output from reverse DFT
void save_image_data_real(const char* filename, gComplex* d, int w, int h) {
std::ofstream ofs(filename);
if(!ofs) {
std::cout << "Can't create output image " << filename << std::endl;
return;
}
ofs << w << " " << h << std::endl;
for (int r = 0; r < h; ++r) {
for (int c = 0; c < w; ++c) {
ofs << d[r * w + c].real << " ";
}
ofs << std::endl;
}
}
private:
int w;
int h;
gComplex* data;
};
void transpose(gComplex* arr, int size) {
int n = sqrt(size);
for(int i = 0; i < n; i++) {
for(int j = i + 1; j < n; j++) {
swap(arr[n * i + j], arr[n * j + i]);
}
}
}
__global__ void cudaDFT1D(gComplex* H, gComplex* h, int N) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int col = idx % N;
int row = idx / N;
int start = row * N;
for (int k = 0; k < N; k++) {
gComplex W(cos(2 * PI * col * k / float(N)), -sin(2 * PI * col * k / float(N)));
H[idx] = H[idx] + W * h[start + k];
}
}
__global__ void cudaDFT1DCol(gComplex* H, gComplex* h, int N) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int col = idx % N;
int row = idx / N;
int start = col;
for (int k = 0; k < N; k++) {
gComplex W(cos(2 * PI * row * k / float(N)), -sin(2 * PI * row * k / float(N)));
H[idx] = H[idx] + W * h[start + N*k];
}
}
void cudaDFT2D(InputImage inputImage, gComplex* h, int width, int height, string outputName) {
int size = width * height;
gComplex* d_H;
gComplex* d_h;
cudaMalloc((void **) &d_h, size * sizeof(gComplex));
cudaMalloc((void **) &d_H, size * sizeof(gComplex));
cudaMemcpy(d_h, h, size * sizeof(gComplex), cudaMemcpyHostToDevice);
cudaDFT1D<<<size / T_P_B, T_P_B>>>(d_H, d_h, width);
cudaDeviceSynchronize();
gComplex* d_H2;
cudaMalloc((void **) &d_H2, size * sizeof(gComplex));
cudaDFT1DCol<<<size / T_P_B, T_P_B>>>(d_H2, d_H, height);
cudaDeviceSynchronize();
cudaMemcpy(h, d_H2, size * sizeof(gComplex), cudaMemcpyDeviceToHost);
inputImage.save_image_data(outputName.c_str(), h, width, height);
cudaFree(d_h);
cudaFree(d_H);
cudaFree(d_H2);
}
int main(int argc, char** argv) {
string forward = argv[1];
string inputName = argv[2];
string outputName = argv[3];
InputImage inputImage(inputName.c_str());
int width = inputImage.get_width(); // N
int height = inputImage.get_height();
gComplex* h = inputImage.get_image_data();
cudaDFT2D(inputImage, h, width, height, outputName);
return 0;
}
|
9,231 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define imin(a,b) (a<b?a:b)
//number of elements to be processed
const int N = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N+threadsPerBlock-1) / threadsPerBlock );
//handle when there millions of elements for vectors to be processed
__global__ void sum_array_gpu_long(int *a,int *b,int *c,int size)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid<size){
c[tid] = a[tid] + b[tid];
//tid += blockDim.x * gridDim.x;
if((tid+blockDim.x*gridDim.x)<size){
printf("max: %d, tid: %d, added_value: %d\n", blockDim.x*gridDim.x, tid, tid+blockDim.x*gridDim.x);
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void dot(float *a,float *b, float *c){
//shared memory between threads in one block, compiler will create a copy of the shared variables for each block
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while(tid<N){
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
//set the cache values
cache[cacheIndex] = temp;
//synchronize threads in this block
//this call guarantees that every thread in the block has completed instructions prior
//to the __syncthreads()
__syncthreads();
//parallel to sum all elements in one array
//for reductions, threadsPerBlock must be a power of 2
int i = blockDim.x/2;
while(i != 0){
if(cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if(cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main(int argc, char *argv[])
{
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
// allocate memory on the CPU side
a = (float*)malloc( N*sizeof(float) );
b = (float*)malloc( N*sizeof(float) );
partial_c = (float*)malloc( blocksPerGrid*sizeof(float) );
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_a,N*sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b,N*sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_partial_c,blocksPerGrid*sizeof(float) ) );
// fill in the host memory with data
for (int i=0; i<N; i++) {
a[i] = i;
b[i] = i*2;
}
// copy the arrays ‘a’ and ‘b’ to the GPU
HANDLE_ERROR( cudaMemcpy( dev_a, a, N*sizeof(float),cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b, b, N*sizeof(float),cudaMemcpyHostToDevice ) );
dot<<<blocksPerGrid,threadsPerBlock>>>( dev_a, dev_b,dev_partial_c );
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( cudaMemcpy( partial_c, dev_partial_c,blocksPerGrid*sizeof(float),cudaMemcpyDeviceToHost ) );
// finish up on the CPU side
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
printf("Does GPU value %.6g = %.6g?\n", c,2 * sum_squares( (float)(N - 1) ) );
// free memory on the GPU side
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_partial_c );
// free memory on the CPU side
free( a );
free( b );
free( partial_c );
return 0;
}
|
9,232 | #include<stdio.h>
#include <stdlib.h>
__global__ void replaceMat(int *mat, int *res){
int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
//res[i*n+j] = mat[i*n+j];
res[i*n+j] = powf(mat[i*n+j],i+1);
//for(int p=0;p<i;p++)
//res[i*n+j] *= mat[i*n+j];
}
int main(){
int *a,*t,m,n,i,j,*da,*dt;
printf("Enter m: ");
scanf("%d",&m);
printf("Enter n: ");
scanf("%d",&n);
int size = sizeof(int)*m*n;
a = (int *)malloc(size);
t = (int *)malloc(size);
printf("Enter the matrix:\n");
for(i=0;i<m*n;i++)
scanf("%d",&a[i]);
cudaMalloc((void **)&da,size);
cudaMalloc((void **)&dt,size);
cudaMemcpy(da,a,size,cudaMemcpyHostToDevice);
replaceMat<<<m,n>>>(da,dt);
cudaMemcpy(t,dt,size,cudaMemcpyDeviceToHost);
printf("Result:\n");
for(i=0;i<m;i++){
for(j=0;j<n;j++)
printf("%d ",t[i*n+j]);
printf("\n");
}
cudaFree(da);
cudaFree(dt);
return 0;
}
|
9,233 | #include <time.h>
#include "stdio.h"
#include "cuda_runtime.h"
#define BlockNum 256
#define ThreadNum 1024
#define Len 4
__host__ __device__ unsigned int crc32(unsigned char *message)
{
int i, j;
unsigned int byte, crc, mask;
i = 0;
crc = 0xFFFFFFFF;
while (message[i] != 0)
{
byte = message[i]; // Get next byte.
crc = crc ^ byte;
for (j = 7; j >= 0; j--) // Do eight times.
{
mask = -(crc & 1);
crc = (crc >> 1) ^ (0xEDB88320 & mask);
}
i = i + 1;
}
return ~crc;
}
__host__ void crc32Host(int len, unsigned int target)
{
unsigned char buf[Len];
for(int i=0;i<len;i++)
{
buf[i]=0;
}
unsigned int crc=0;
while(target!=crc)
{
buf[0]++;
for(int i=0;i<len;i++)
{
if (buf[i]>=255)
{
buf[(i+1)%len]++;
buf[i]=0;
}
}
crc=crc32(buf);
if(crc == target)
{
printf("Input Found in CPU=");
for (int i = 0; i < Len; ++i)
{
printf("%c",buf[i]);
}
printf("\n");
break;
}
}
}
__global__ void crc32Device(int len, unsigned int target)
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int size = BlockNum*ThreadNum;
unsigned long long spacesearch=1;
for(int i=0;i<len;i++)
{
spacesearch *=256;
}
if(idx==0) printf("spacesearc=%ld,Size=%d\n",spacesearch,size );
{
__syncthreads();
}
unsigned char buf[Len];
for(int i=0;i<len;i++)
{
buf[i]=0;
}
unsigned int crc=0;
unsigned int index=idx*((spacesearch/size)+1);
for(int i=0;i<Len;i++)
{
buf[i]=(unsigned char)((index)&0xff);
index=(index) >>8;
}
for(int i=0;i<((spacesearch/size)+1);i++)
{
for(int j=0;j<len;j++)
{
if (buf[j]>=255)
{
buf[(j+1)%len]++;
buf[j]=0;
}
}
crc=crc32(buf);
buf[0]++;
if(crc == target)
{
printf("Input Found in GPU=");
for (int i = 0; i < Len; ++i)
{
printf("%c",buf[i]);
}
printf("\n");
}
}
__syncthreads();
}
int main()
{
unsigned char boi[Len]={0};
for(int i=0;i<Len;i++)
boi[i]='b';
unsigned int test =crc32(boi);
printf("%x\n",test );
// Set the Device Number
cudaSetDevice(0);
// Allocating memory in device
int len; unsigned int target;
cudaMalloc((void**)&len, sizeof(int) * 1);
cudaMalloc((void**)&target, sizeof(unsigned int) * 1);
// Setting CUDA timer finction
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Host function in CPU
cudaEventRecord(start,0);
// crc32Host( Len,test);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float miliseconds_cpu = 0;
cudaEventElapsedTime(&miliseconds_cpu,start,stop);
// printf("Elapsed Time for the CPU computation is :%f\n",miliseconds_cpu/1000);
// Device function in GPU
cudaEventCreate(&start);
cudaEventCreate(&stop);
float miliseconds_gpu = 0;
cudaEventRecord(start,0);
crc32Device<<<BlockNum,ThreadNum>>>(Len, test);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&miliseconds_gpu,start,stop);
printf("Elapsed Time for the GPU computation is :%f\n",miliseconds_gpu/1000);
//printf("GPU speedup over CPU is :%f\nx",miliseconds_cpu/miliseconds_gpu);
cudaDeviceReset();
return 0;
} |
9,234 | /*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 1.1
* copyright (c) 2022, Universitat Politècnica de València (UPV), PRHLT Research Centre
* Date: March 2022
* Author: PRHLT Research Centre, UPV, (rparedes@prhlt.upv.es), (jon@prhlt.upv.es)
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cuda.h>
__global__ void gpu_where(float *condition, float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
if((bool) condition[thread_id_x]){
C[thread_id_x] = A[thread_id_x];
}else{
C[thread_id_x] = B[thread_id_x];
}
}
}
__global__ void gpu_where_back(float *condition, float *PD_A, float *PD_B, float *D, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
if((bool) condition[thread_id_x]){
PD_A[thread_id_x] += D[thread_id_x];
}else{
PD_B[thread_id_x] += D[thread_id_x];
}
}
} |
9,235 | #include "includes.h"
__global__ void generate_encrypted(int *pDataPointer , int *pRandomData , int *pEncryptedData , long long int pSize)
{
long long int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index <=(pSize /sizeof(int) ))
{
(*(pEncryptedData+index)) = (*(pDataPointer+ index))^(*(pRandomData+index));
}
else
return;
} |
9,236 | #include <stdio.h>
__global__ void vectorMultiplyBy2(float *v, float *w, size_t n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
w[i] = v[i] * 2;
}
int main() {
size_t N = 1024 * 1024 * 1024;
size_t size = N * sizeof(float);
float *a = (float *) malloc(size);
float *b = (float *) malloc(size);
float *b_check = (float *) malloc(size);
for (int i = 0; i < N; i++) {
a[i] = i;
}
for (int i = 0; i < N; i++) {
b_check[i] = a[i] * 2;
}
float *ha;
cudaMalloc((void **) &ha, size);
float *hb;
cudaMalloc((void **) &hb, size);
cudaMemcpy(ha, a, size, cudaMemcpyHostToDevice);
int tInB = 1024;
dim3 threadsInBlock(tInB);
int numberOfBlocks = 32768;
printf("Number of blocks is %d\n", numberOfBlocks);
dim3 nBlocks(numberOfBlocks, 32768);
vectorMultiplyBy2<<<nBlocks, threadsInBlock>>>(ha, hb, N);
cudaMemcpy(b, hb, size, cudaMemcpyDeviceToHost);
int cmp = memcmp(b, b_check, size);
if (cmp == 0) {
printf("Arrays are equal.\n");
} else {
printf("Arrays are not equal.\n");
}
return 0;
}
|
9,237 | #define BLOCK_SIZE 128
#define DATALEN_PER_BLOCK (BLOCK_SIZE * 2)
#include <stdio.h>
/**
this works, because
last warp is like simd ? when is simd or not?
volatile because each instruct deps on last result, why others not need ?
**/
__device__ void warpReduce(volatile float *sdata, int tid)
{
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reductionKernel(float *a, float *r)
{
int blocksz = blockDim.x;
int tid = threadIdx.x;
int bid = blockIdx.x;
int i1 = DATALEN_PER_BLOCK * bid + tid;
int i2 = i1 + blocksz;
__shared__ float shared_data[BLOCK_SIZE];
shared_data[tid] = a[i1] + a[i2];
//shared_data[tid + blocksz] = a[i2];
__syncthreads();
for (int i = blocksz / 2; i > 32; i >>= 1) {
if (tid < i) {
shared_data[tid] += shared_data[tid + i];
}
__syncthreads();
}
if (tid < 32)
warpReduce(shared_data, tid);
r[bid] = shared_data[0];
}
float reduction(float *a, size_t len)
{
int data_len_per_block = DATALEN_PER_BLOCK;
float *dr = nullptr;
float *da = nullptr;
float *r = new float[data_len_per_block * sizeof(float)];
size_t tlen = len;
cudaMalloc(&da, sizeof(float) * len);
for (int k = 0; k < 20; k++) {
len = tlen;
cudaMemcpy(da, a, sizeof(float) * len, cudaMemcpyHostToDevice);
while (len > data_len_per_block) {
len /= data_len_per_block;
dim3 threads(BLOCK_SIZE);
dim3 grids(len);
reductionKernel<<<grids, threads>>>(da, da);
}
}
cudaMemcpy(r, da, len * sizeof(float), cudaMemcpyDeviceToHost);
if (len > 0) {
for (int i = 1; i < len; i++)
r[0] += r[i];
}
int rr = r[0];
cudaFree(&da);
cudaFree(&dr);
free(r);
return rr;
}
int main(int argc, char *argv[])
{
int len = 8192;
if (argc > 1)
len = atoi(argv[1]);
printf("len %d\n", len);
float *a = new float[len];
for (int i = 0; i < len; i++)
a[i] = 1.0;
float r = reduction(a, len);
printf("%.2f\n", r);
return 0;
} |
9,238 | #include "includes.h"
using namespace std;
__global__ void AddIntsCuda(int *a, int *b)
{
int i = threadIdx.x;
a[i] += b[i];
} |
9,239 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
// forward propogation
/*
def forward(X, W, v):
Z_trans = relu(W@X.T) # mat-mat
Z = Z_trans.T # trans
yhat = Z@v # mat-vec
return Z, yhat
*/
/* Parameter Setup */
#define N 4 // # of input samples
#define D 2 // # of input neurons
#define K 3 // # of hidden neurons
// X: input matrix (n * d)
#define X_HEIGHT N
#define X_WIDTH D
#define X_N X_HEIGHT * X_WIDTH
// Z: ifmap matrix (n * k)
#define Z_HEIGHT N
#define Z_WIDTH K
#define Z_N Z_HEIGHT * Z_WIDTH
// W: layer 1 weights (k * d)
#define W_HEIGHT K
#define W_WIDTH D
#define W_N W_HEIGHT * W_WIDTH
// v: layer 2 weights
#define V_HEIGHT K
#define V_WIDTH 1
#define V_N V_HEIGHT * V_WIDTH
#define BLOCK_SIZE 32
#define MAX_ERR 1e-6
__global__ void matrix_mul(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) {
int cid = blockIdx.y * blockDim.y + threadIdx.y;
int rid = blockIdx.x * blockDim.x + threadIdx.x;
if(rid < d_a_height && cid < d_b_width){
// sum: to evaluated dot product
double sum = 0.0;
for(int k = 0; k < d_a_width; k++){
sum += d_A[rid * d_a_width + k] * d_B[d_b_width*k + cid];
}
d_C[rid * d_b_width + cid] = sum;
}
}
__global__ void relu_matrix_mul(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) {
int cid = blockIdx.y * blockDim.y + threadIdx.y;
int rid = blockIdx.x * blockDim.x + threadIdx.x;
if(rid < d_a_height && cid < d_b_width){
// sum: to evaluated dot product
double sum = 0.0;
for(int k = 0; k < d_a_width; k++){
sum += d_A[rid * d_a_width + k] * d_B[d_b_width*k + cid];
}
d_C[rid * d_b_width + cid] = (sum>0)?sum:0;
}
}
__global__ void matrix_transpose(double *d_out, double *d_in, int d_in_width, int d_out_width) {
int cid = blockIdx.y * blockDim.y + threadIdx.y;
int rid = blockIdx.x * blockDim.x + threadIdx.x;
if(cid < d_in_width && rid < d_out_width){
d_out[cid * d_out_width + rid] = d_in[rid * d_in_width + cid];
}
}
int main(){
double *h_X, *h_W, *h_v;
double *h_Z, *h_Z_T, *h_yhat;
double *d_X, *d_X_T, *d_W, *d_v;
double *d_Z, *d_Z_T, *d_yhat;
// double *h_ref; // compute verified results
// Allocate host memory
h_X = (double*)malloc(sizeof(double) * X_N);
h_W = (double*)malloc(sizeof(double) * W_N);
h_v = (double*)malloc(sizeof(double) * V_N);
h_Z_T = (double*)malloc(sizeof(double) * Z_N);
h_Z = (double*)malloc(sizeof(double) * Z_N);
h_yhat = (double*)malloc(sizeof(double) * N);
// h_ref = (double*)malloc(sizeof(double) * N);
// Initialize host arrays
/*** TEST 1 ***/
for(int i = 0; i < X_N; i++){
if(i == 1 || i == 3){
h_X[i] = (double)(-i-1);
} else{
h_X[i] = (double)(i+1);
}
}
for(int i = 0; i < W_N; i++){
h_W[i] = double(i+1);
}
for(int i = 0; i < V_HEIGHT; i++){
h_v[i] = (double)(i+1);
}
/*** TEST 2 ***/
// rand((unsigned int)time(NULL));
// for (int i = 0; i< A_N; i++){
// h_A[i] = (double)rand()/(double)(RAND_MAX);
// }
// for (int i = 0; i< B_N; i++){
// h_B[i] = (double)rand()/(double)(RAND_MAX);
// }
// Allocate device memory
cudaMalloc((void**)&d_X, sizeof(double) * X_N);
cudaMalloc((void**)&d_X_T, sizeof(double) * X_N);
cudaMalloc((void**)&d_Z, sizeof(double) * Z_N);
cudaMalloc((void**)&d_Z_T, sizeof(double) * Z_N);
cudaMalloc((void**)&d_W, sizeof(double) * W_N);
cudaMalloc((void**)&d_v, sizeof(double) * V_N);
cudaMalloc((void**)&d_yhat, sizeof(double) * N);
// Transfer data from host to device memory
cudaMemcpy(d_X, h_X, sizeof(double) * X_N, cudaMemcpyHostToDevice);
cudaMemcpy(d_W, h_W, sizeof(double) * W_N, cudaMemcpyHostToDevice);
cudaMemcpy(d_v, h_v, sizeof(double) * V_N, cudaMemcpyHostToDevice);
// Executing kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// X_HEIGHT (N) corresponding to OUT_WIDTH, X_WIDTH (D) corresponding to IN_WIDTH
dim3 dimGrid1(N / BLOCK_SIZE + 1,D / BLOCK_SIZE + 1);
matrix_transpose<<<dimGrid1,dimBlock>>>(d_X_T, d_X, D, N);
dim3 dimGrid2(K / BLOCK_SIZE + 1, N / BLOCK_SIZE + 1);
relu_matrix_mul<<<dimGrid2,dimBlock>>>(d_Z_T, d_W, d_X_T, K, D, N);
dim3 dimGrid3(K / BLOCK_SIZE + 1, N / BLOCK_SIZE + 1);
matrix_transpose<<<dimGrid3,dimBlock>>>(d_Z, d_Z_T, N, K);
dim3 dimGrid4(N / BLOCK_SIZE + 1, 1 / BLOCK_SIZE + 1);
matrix_mul<<<dimGrid4,dimBlock>>>(d_yhat, d_Z, d_v, N, K, 1);
// Transfer data back to host memory
cudaMemcpy(h_yhat, d_yhat, sizeof(double) * N, cudaMemcpyDeviceToHost);
// Verification
for(int i = 0; i < N; i++){
for(int j = 0; j < 1; j++){
// double sum = 0.0;
// for(int k = 0; k < A_WIDTH; k++){
// sum += h_A[i*A_WIDTH+k] * h_B[k*B_WIDTH + j];
// }
// h_ref[i * C_WIDTH + j] = sum;
// assert(fabs(h_ref[i*C_WIDTH + j] - h_C[i * C_WIDTH + j]) < MAX_ERR);
printf("h_yhat[%d][%d] = %f\n", i, j, h_yhat[i * 1 + j]);
// printf("h_ref[%d][%d] = %f\n", i, j, h_ref[i * C_WIDTH + j]);
}
}
printf("PASSED\n");
// Deallocate device memory
cudaFree(d_X);
cudaFree(d_X_T);
cudaFree(d_W);
cudaFree(d_v);
cudaFree(d_Z);
cudaFree(d_Z_T);
cudaFree(d_yhat);
// Deallocate host memory
free(h_X);
free(h_W);
free(h_v);
free(h_Z);
free(h_Z_T);
free(h_yhat);
}
|
9,240 | #include <cstdlib>
#include <cstdio>
#include <iostream>
using namespace std;
const int ALIVE = 1;
const int DEAD = 0;
/* prints the table passed into the the function and its generation */
void printTable(int* table, int gen, int N2){
cout << "Generation " << gen << ":\n";
for (int i = 0; i < N2; i++) {
for (int j = 0; j < N2; j++) {
cout << table[N2 * i + j] << " ";
}
cout << "\n";
}
}
/* modifies the nextGen table to represent the next generation of The Game of Life */
__global__
void nextGeneration(int* table, int* nextGen, int N2) {
int j = blockDim.x * blockIdx.x + threadIdx.x + 1;
for (int i = 1; i < N2-1; i++){
if (j < N2-1) {
int localChange = 0;
int neighbors = 0;
neighbors += table[N2 * (i-1) + j];
neighbors += table[N2 * (i-1) + (j-1)];
neighbors += table[N2 * (i-1) + (j+1)];
neighbors += table[N2 * (i+1) + j];
neighbors += table[N2 * (i+1) + (j-1)];
neighbors += table[N2 * (i+1) + (j+1)];
neighbors += table[N2 * i + (j+1)];
neighbors += table[N2 * i + (j-1)];
if (table[N2 * i + j] == DEAD && neighbors == 3) {
nextGen[N2 * i + j] = ALIVE;
localChange = 1;
}
if (neighbors <= 1 || neighbors >= 4) {
if (table[N2 * i + j] == ALIVE) {
nextGen[N2 * i + j] = DEAD;
localChange = 1;
}
}
/* this is used to make sure the two tables stay up to date with each other over the generations since they are being swapped after each iteration */
if(localChange == 0) {
nextGen[N2 * i + j] = table[N2 * i + j];
}
}
}
}
/* initializes a table according to the size provided by the user with each element being randomized to be alive or dead */
void initTable(int* table, int N2){
for (int i = 0; i < N2; i++) {
for (int j = 0; j < N2; j++) {
if (i == N2 - 1 || j == N2 - 1 || i == 0 || j == 0) {
table[N2 * i + j] = DEAD;
}
else {
if (rand() % 2 < 1) {
table[N2 * i + j] = ALIVE;
}
else {
table[N2 * i + j] = DEAD;
}
}
}
}
}
int main(int argc, char *argv[]){
clock_t starttime, endtime;
int N = atoi(argv[1]);
int maxGen = atoi(argv[2]);
srand(time(NULL));
//freopen("output2.txt", "w", stdout);
const int N2 = N + 2;
int *table;
int *nextGen;
cudaMallocManaged((void **)&table, N2 * N2 * sizeof(int));
cudaMallocManaged((void **)&nextGen, N2 * N2 * sizeof(int));
initTable(table, N2);
/* copying the initial values of the table into the nextGen table */
for (int i = 0; i < N2; i++) {
for (int j = 0; j < N2; j++) {
nextGen[N2 * i + j] = table[N2 * i + j];
}
}
int blockSize = 128;
int numBlocks = (N + blockSize - 1) / blockSize;
starttime = clock();
/* the main game loop that continues until the max generation or the game over condition has been met */
for(int i = 0; i < maxGen; i++){
//printTable(table, i, N2);
nextGeneration <<<numBlocks, blockSize>>> (table, nextGen, N2);
cudaDeviceSynchronize();
swap(table, nextGen);
}
endtime = clock();
printf("Time taken = %lf seconds\n", ((double) endtime - starttime) / CLOCKS_PER_SEC);
cudaFree(table);
cudaFree(nextGen);
return 0;
}
|
9,241 | #include "includes.h"
__global__ void kernelSuma_Vectores(float* array_A, float* array_B, int _size){
int idx= blockIdx.x*blockDim.x+threadIdx.x;
if(idx<_size){
array_A[idx] = array_A[idx] + array_B[idx];
}
} |
9,242 | #include <stdio.h>
#define N 1
#define TPB 256
__global__ void helloWorldKernel()
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
printf("Hello World! My threadId is %2d\n", i);
}
int main()
{
// Launch kernel to print “Hello World!” and the thread identifier.
helloWorldKernel<<<N, TPB>>>();
// Synchronize device
cudaDeviceSynchronize();
return 0;
} |
9,243 | /*author: Zeke Elkins
*date: 3/27/2014
*description: a simple hello world program */
#include <iostream>
using namespace std;
int main(void) {
cout << "Hello World" << endl;
return 0;
}
|
9,244 | #include <cuda.h>
#include <stdio.h>
float * boats;
void checkError()
{
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
}
__global__ void normalMAKernel(float * d)
{
float a = 234.3;
float b = 4672.323;
float c = 392053.2345;
d[0] = 1.0/sqrt(c);
}
__global__ void speedyMAKernel(float * d)
{
float a = 234.3;
float b = 4672.323;
float c = 392053.2345;
d[0] = __frsqrt_rn(c);
}
int main(int argc, char *args[])
{
cudaSetDevice(1);
printf("I ran a\n");
cudaEvent_t normalStart,normalStop, fastStart,fastStop;
cudaEventCreate(&normalStart);
cudaEventCreate(&normalStop);
cudaEventCreate(&fastStart);
cudaEventCreate(&fastStop);
cudaError_t err = cudaMalloc(&boats, 100 * sizeof(float));
if ( err != cudaSuccess ) return 0;
cudaEventRecord(normalStart);
normalMAKernel<<<214748,1>>>(boats);
speedyMAKernel<<<214748,1>>>(boats);
checkError();
cudaDeviceSynchronize();
cudaEventRecord(normalStop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds,normalStart,normalStop);
printf("It took %f",milliseconds);
} |
9,245 | #define CUDART_NAN_F __int_as_float(0x7fffffff)
#include "math.h"
using namespace std;
__global__ void impalaFindSmem(const float* list, const float* dataEvent, const int listLength, const int dataLength, int* matches) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
//int y = blockIdx.y * blockDim.y + threadIdx.y;
int temp = 1;
//TODO: Implement shared memory
//__shared__ float dataEventShared[1024];
//dataEventShared = dataEvent;
if(x < listLength) {
// For each element in dataEvent
for(int y=0; y<dataLength ; ++y){
// Compare to entry in list and set temp to 0 if mismatch, except if NaN
if(!isnan(dataEvent[y]) && (list[dataLength*x+y] != dataEvent[y])){
temp = 0;
}
}
matches[x] = temp;
}
} |
9,246 | #include <stdio.h>
#include<sys/time.h>
__global__ void square(float * d_out, float * d_in,int mod){
int idx = threadIdx.x;
float f = d_in[idx];
switch(idx%32){
case 0: d_out[idx] = f*f;break;
case 1: d_out[idx] = f*f;break;
case 2: d_out[idx] = f*f;break;
case 3: d_out[idx] = f*f;break;
case 4: d_out[idx] = f*f;break;
case 5: d_out[idx] = f*f;break;
case 6: d_out[idx] = f*f;break;
case 7: d_out[idx] = f*f;break;
case 8: d_out[idx] = f*f;break;
case 9: d_out[idx] = f*f;break;
case 10: d_out[idx] = f*f;break;
case 11: d_out[idx] = f*f;break;
case 12: d_out[idx] = f*f;break;
case 13: d_out[idx] = f*f;break;
case 14: d_out[idx] = f*f;break;
case 15: d_out[idx] = f*f;break;
case 16: d_out[idx] = f*f;break;
case 17: d_out[idx] = f*f;break;
case 18: d_out[idx] = f*f;break;
case 19: d_out[idx] = f*f;break;
case 20: d_out[idx] = f*f;break;
case 21: d_out[idx] = f*f;break;
case 22: d_out[idx] = f*f;break;
case 23: d_out[idx] = f*f;break;
case 24: d_out[idx] = f*f;break;
case 25: d_out[idx] = f*f;break;
case 26: d_out[idx] = f*f;break;
case 27: d_out[idx] = f*f;break;
case 28: d_out[idx] = f*f;break;
case 29: d_out[idx] = f*f;break;
case 30: d_out[idx] = f*f;break;
case 31: d_out[idx] = f*f;break;
}
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
for(int i=1;i<100;i++){
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for(long long int j=0;j<10000000;j++)
square<<<i, ARRAY_SIZE>>>(d_out, d_in,32);
gettimeofday(&tv2, NULL);
printf ("%d\t%f\n",i,
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
}
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
9,247 | /*
Author: Su, Ming Yi
Date: 11/16/2018
Goal: Learn to use cuda to sum up two vectors
How to compile it:
nvcc -O -o example_2 example_2.cu
How to run it:
./example_2
*/
#include "stdio.h"
#define N 10
__global__ void add(int *a, int *b, int *c, char *d)
{
int tID = blockIdx.x;
if(tID < N)
{
c[tID] = a[tID] + b[tID];
printf("c[%d] = %d\n", tID, c[tID]);
}
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
char *dev_d;
cudaMalloc((void **) &dev_a, N*sizeof(int));
cudaMalloc((void **) &dev_b, N*sizeof(int));
cudaMalloc((void **) &dev_c, N*sizeof(int));
for(int i=0;i<N;i++)
{
a[i] = i;
b[i] = i;
}
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
add<<<N,1>>>(dev_a, dev_b, dev_c, dev_d);
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i=0;i<N;i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
return 0;
} |
9,248 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_i= (int)(blockDim.z);
int i0 = (int)(blockIdx.z)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.z);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (i>=2 & k>=2 & i<=N-3 & k<=N-3) {
for (int j=2; j<=N-3; j++) {
double _t_27_;
double _t_105_;
double _t_8_;
double _t_87_;
double _t_102_;
double _t_84_;
double _t_24_;
double _t_5_;
double _t_104_;
double _t_103_;
double _t_101_;
double _t_123_;
double _t_122_;
double _t_120_;
double _t_141_;
double _t_140_;
double _t_138_;
double _t_86_;
double _t_85_;
double _t_83_;
double _t_110_;
double _t_108_;
double _t_115_;
double _t_113_;
double _t_288_;
double _t_286_;
double _t_129_;
double _t_127_;
double _t_134_;
double _t_132_;
double _t_294_;
double _t_292_;
double _t_147_;
double _t_145_;
double _t_152_;
double _t_150_;
double _t_299_;
double _t_297_;
double _t_92_;
double _t_90_;
double _t_97_;
double _t_95_;
double _t_283_;
double _t_281_;
double _t_106_;
double _t_66_;
double _t_100_;
double _t_47_;
double _t_88_;
double _t_82_;
double _t_44_;
double _t_63_;
double _t_81_;
double _t_111_;
double _t_32_;
double _t_109_;
double _t_13_;
double _t_93_;
double _t_91_;
double _t_11_;
double _t_30_;
double _t_112_;
double _t_71_;
double _t_107_;
double _t_52_;
double _t_94_;
double _t_89_;
double _t_50_;
double _t_69_;
double _t_116_;
double _t_38_;
double _t_114_;
double _t_19_;
double _t_98_;
double _t_96_;
double _t_17_;
double _t_36_;
double _t_117_;
double _t_77_;
double _t_58_;
double _t_99_;
double _t_80_;
double _t_56_;
double _t_75_;
double _t_124_;
double _t_28_;
double _t_121_;
double _t_9_;
double _t_142_;
double _t_139_;
double _t_125_;
double _t_67_;
double _t_119_;
double _t_118_;
double _t_48_;
double _t_143_;
double _t_137_;
double _t_130_;
double _t_33_;
double _t_128_;
double _t_14_;
double _t_148_;
double _t_146_;
double _t_131_;
double _t_72_;
double _t_126_;
double _t_53_;
double _t_149_;
double _t_144_;
double _t_135_;
double _t_39_;
double _t_133_;
double _t_20_;
double _t_153_;
double _t_151_;
double _t_136_;
double _t_78_;
double _t_59_;
double _t_154_;
double _t_79_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_26_;
double _t_25_;
double _t_178_;
double _t_23_;
double _t_22_;
double _t_21_;
double _t_1_;
double _t_176_;
double _t_37_;
double _t_35_;
double _t_34_;
double _t_31_;
double _t_173_;
double _t_29_;
double _t_171_;
double _t_46_;
double _t_45_;
double _t_191_;
double _t_43_;
double _t_42_;
double _t_189_;
double _t_41_;
double _t_40_;
double _t_57_;
double _t_55_;
double _t_54_;
double _t_51_;
double _t_186_;
double _t_49_;
double _t_184_;
double _t_65_;
double _t_64_;
double _t_203_;
double _t_62_;
double _t_61_;
double _t_201_;
double _t_60_;
double _t_76_;
double _t_74_;
double _t_73_;
double _t_70_;
double _t_198_;
double _t_68_;
double _t_196_;
double _t_0_;
double _t_7_;
double _t_6_;
double _t_166_;
double _t_4_;
double _t_3_;
double _t_2_;
double _t_164_;
double _t_18_;
double _t_16_;
double _t_15_;
double _t_12_;
double _t_161_;
double _t_10_;
double _t_159_;
double _t_162_;
double _t_225_;
double _t_160_;
double _t_213_;
double _t_174_;
double _t_172_;
double _t_211_;
double _t_223_;
double _t_163_;
double _t_250_;
double _t_158_;
double _t_238_;
double _t_157_;
double _t_156_;
double _t_175_;
double _t_170_;
double _t_169_;
double _t_236_;
double _t_248_;
double _t_167_;
double _t_230_;
double _t_165_;
double _t_218_;
double _t_179_;
double _t_177_;
double _t_216_;
double _t_228_;
double _t_168_;
double _t_255_;
double _t_243_;
double _t_180_;
double _t_155_;
double _t_241_;
double _t_253_;
double _t_187_;
double _t_226_;
double _t_185_;
double _t_214_;
double _t_199_;
double _t_197_;
double _t_188_;
double _t_251_;
double _t_183_;
double _t_239_;
double _t_182_;
double _t_181_;
double _t_200_;
double _t_195_;
double _t_194_;
double _t_192_;
double _t_231_;
double _t_190_;
double _t_219_;
double _t_204_;
double _t_202_;
double _t_193_;
double _t_256_;
double _t_244_;
double _t_205_;
double _t_229_;
double _t_227_;
double _t_207_;
double _t_224_;
double _t_222_;
double _t_221_;
double _t_220_;
double _t_266_;
double _t_264_;
double _t_254_;
double _t_252_;
double _t_232_;
double _t_249_;
double _t_247_;
double _t_246_;
double _t_245_;
double _t_277_;
double _t_275_;
double _t_242_;
double _t_240_;
double _t_237_;
double _t_235_;
double _t_234_;
double _t_272_;
double _t_270_;
double _t_233_;
double _t_206_;
double _t_217_;
double _t_215_;
double _t_212_;
double _t_210_;
double _t_209_;
double _t_261_;
double _t_259_;
double _t_208_;
double _t_262_;
double _t_289_;
double _t_267_;
double _t_287_;
double _t_284_;
double _t_282_;
double _t_260_;
double _t_265_;
double _t_263_;
double _t_300_;
double _t_268_;
double _t_298_;
double _t_258_;
double _t_295_;
double _t_293_;
double _t_257_;
double _t_285_;
double _t_278_;
double _t_290_;
double _t_276_;
double _t_280_;
double _t_273_;
double _t_271_;
double _t_274_;
double _t_301_;
double _t_291_;
double _t_279_;
double _t_269_;
double _t_296_;
_t_27_ = -u1[i-2][j][k-2];
_t_105_ = -u1[i-2][j][k-2];
_t_105_ += u1[i-2][j][k+2];
_t_8_ = -u1[i-2][j][k+2];
_t_27_ += u1[i+2][j][k-2];
_t_87_ = -u1[i+2][j][k-2];
_t_8_ += u1[i+2][j][k+2];
_t_87_ += u1[i+2][j][k+2];
_t_102_ = c2 * _t_105_;
_t_84_ = c2 * _t_87_;
_t_24_ = c2 * _t_27_;
_t_5_ = c2 * _t_8_;
_t_104_ = 2.0 * mu[i-2][j][k];
_t_104_ += la[i-2][j][k];
_t_103_ = _t_104_ * met2[i-2][j][k];
_t_101_ = _t_103_ * met1[i-2][j][k];
_t_123_ = 2.0 * mu[i+1][j][k];
_t_123_ += la[i+1][j][k];
_t_122_ = _t_123_ * met2[i+1][j][k];
_t_120_ = _t_122_ * met1[i+1][j][k];
_t_141_ = 2.0 * mu[i-1][j][k];
_t_141_ += la[i-1][j][k];
_t_140_ = _t_141_ * met2[i-1][j][k];
_t_138_ = _t_140_ * met1[i-1][j][k];
_t_86_ = 2.0 * mu[i+2][j][k];
_t_86_ += la[i+2][j][k];
_t_85_ = _t_86_ * met2[i+2][j][k];
_t_83_ = _t_85_ * met1[i+2][j][k];
_t_110_ = la[i-2][j][k] * met3[i-2][j][k];
_t_108_ = _t_110_ * met1[i-2][j][k];
_t_115_ = la[i-2][j][k] * met4[i-2][j][k];
_t_113_ = _t_115_ * met1[i-2][j][k];
_t_288_ = la[i-2][j][k] * met1[i-2][j][k];
_t_286_ = _t_288_ * met1[i-2][j][k];
_t_129_ = la[i+1][j][k] * met3[i+1][j][k];
_t_127_ = _t_129_ * met1[i+1][j][k];
_t_134_ = la[i+1][j][k] * met4[i+1][j][k];
_t_132_ = _t_134_ * met1[i+1][j][k];
_t_294_ = la[i+1][j][k] * met1[i+1][j][k];
_t_292_ = _t_294_ * met1[i+1][j][k];
_t_147_ = la[i-1][j][k] * met3[i-1][j][k];
_t_145_ = _t_147_ * met1[i-1][j][k];
_t_152_ = la[i-1][j][k] * met4[i-1][j][k];
_t_150_ = _t_152_ * met1[i-1][j][k];
_t_299_ = la[i-1][j][k] * met1[i-1][j][k];
_t_297_ = _t_299_ * met1[i-1][j][k];
_t_92_ = la[i+2][j][k] * met3[i+2][j][k];
_t_90_ = _t_92_ * met1[i+2][j][k];
_t_97_ = la[i+2][j][k] * met4[i+2][j][k];
_t_95_ = _t_97_ * met1[i+2][j][k];
_t_283_ = la[i+2][j][k] * met1[i+2][j][k];
_t_281_ = _t_283_ * met1[i+2][j][k];
_t_106_ = -u1[i-2][j][k-1];
_t_66_ = -u1[i-2][j][k-1];
_t_106_ += u1[i-2][j][k+1];
_t_102_ += c1 * _t_106_;
_t_100_ = _t_101_ * _t_102_;
_t_47_ = -u1[i-2][j][k+1];
_t_66_ += u1[i+2][j][k-1];
_t_88_ = -u1[i+2][j][k-1];
_t_47_ += u1[i+2][j][k+1];
_t_88_ += u1[i+2][j][k+1];
_t_84_ += c1 * _t_88_;
_t_82_ = _t_83_ * _t_84_;
_t_44_ = c2 * _t_47_;
_t_63_ = c2 * _t_66_;
_t_81_ = _t_100_ * strx[i];
_t_81_ += _t_82_ * strx[i];
_t_111_ = -u2[i-2][j][k-2];
_t_32_ = -u2[i-2][j][k-2];
_t_111_ += u2[i-2][j][k+2];
_t_109_ = c2 * _t_111_;
_t_13_ = -u2[i-2][j][k+2];
_t_32_ += u2[i+2][j][k-2];
_t_93_ = -u2[i+2][j][k-2];
_t_13_ += u2[i+2][j][k+2];
_t_93_ += u2[i+2][j][k+2];
_t_91_ = c2 * _t_93_;
_t_11_ = c2 * _t_13_;
_t_30_ = c2 * _t_32_;
_t_112_ = -u2[i-2][j][k-1];
_t_71_ = -u2[i-2][j][k-1];
_t_112_ += u2[i-2][j][k+1];
_t_109_ += c1 * _t_112_;
_t_107_ = _t_108_ * _t_109_;
_t_81_ += _t_107_ * stry[j];
_t_52_ = -u2[i-2][j][k+1];
_t_71_ += u2[i+2][j][k-1];
_t_94_ = -u2[i+2][j][k-1];
_t_52_ += u2[i+2][j][k+1];
_t_94_ += u2[i+2][j][k+1];
_t_91_ += c1 * _t_94_;
_t_89_ = _t_90_ * _t_91_;
_t_81_ += _t_89_ * stry[j];
_t_50_ = c2 * _t_52_;
_t_69_ = c2 * _t_71_;
_t_116_ = -u3[i-2][j][k-2];
_t_38_ = -u3[i-2][j][k-2];
_t_116_ += u3[i-2][j][k+2];
_t_114_ = c2 * _t_116_;
_t_19_ = -u3[i-2][j][k+2];
_t_38_ += u3[i+2][j][k-2];
_t_98_ = -u3[i+2][j][k-2];
_t_19_ += u3[i+2][j][k+2];
_t_98_ += u3[i+2][j][k+2];
_t_96_ = c2 * _t_98_;
_t_17_ = c2 * _t_19_;
_t_36_ = c2 * _t_38_;
_t_117_ = -u3[i-2][j][k-1];
_t_77_ = -u3[i-2][j][k-1];
_t_117_ += u3[i-2][j][k+1];
_t_114_ += c1 * _t_117_;
_t_81_ += _t_113_ * _t_114_;
_t_58_ = -u3[i-2][j][k+1];
_t_77_ += u3[i+2][j][k-1];
_t_99_ = -u3[i+2][j][k-1];
_t_58_ += u3[i+2][j][k+1];
_t_99_ += u3[i+2][j][k+1];
_t_96_ += c1 * _t_99_;
_t_81_ += _t_95_ * _t_96_;
_t_80_ = c2 * _t_81_;
_t_56_ = c2 * _t_58_;
_t_75_ = c2 * _t_77_;
_t_124_ = -u1[i+1][j][k-2];
_t_28_ = u1[i+1][j][k-2];
_t_124_ += u1[i+1][j][k+2];
_t_121_ = c2 * _t_124_;
_t_9_ = u1[i+1][j][k+2];
_t_28_ -= u1[i-1][j][k-2];
_t_24_ += c1 * _t_28_;
_t_142_ = -u1[i-1][j][k-2];
_t_9_ -= u1[i-1][j][k+2];
_t_5_ += c1 * _t_9_;
_t_142_ += u1[i-1][j][k+2];
_t_139_ = c2 * _t_142_;
_t_125_ = -u1[i+1][j][k-1];
_t_67_ = u1[i+1][j][k-1];
_t_125_ += u1[i+1][j][k+1];
_t_121_ += c1 * _t_125_;
_t_119_ = _t_120_ * _t_121_;
_t_118_ = _t_119_ * strx[i];
_t_48_ = u1[i+1][j][k+1];
_t_67_ -= u1[i-1][j][k-1];
_t_63_ += c1 * _t_67_;
_t_143_ = -u1[i-1][j][k-1];
_t_48_ -= u1[i-1][j][k+1];
_t_44_ += c1 * _t_48_;
_t_143_ += u1[i-1][j][k+1];
_t_139_ += c1 * _t_143_;
_t_137_ = _t_138_ * _t_139_;
_t_118_ += _t_137_ * strx[i];
_t_130_ = -u2[i+1][j][k-2];
_t_33_ = u2[i+1][j][k-2];
_t_130_ += u2[i+1][j][k+2];
_t_128_ = c2 * _t_130_;
_t_14_ = u2[i+1][j][k+2];
_t_33_ -= u2[i-1][j][k-2];
_t_30_ += c1 * _t_33_;
_t_148_ = -u2[i-1][j][k-2];
_t_14_ -= u2[i-1][j][k+2];
_t_11_ += c1 * _t_14_;
_t_148_ += u2[i-1][j][k+2];
_t_146_ = c2 * _t_148_;
_t_131_ = -u2[i+1][j][k-1];
_t_72_ = u2[i+1][j][k-1];
_t_131_ += u2[i+1][j][k+1];
_t_128_ += c1 * _t_131_;
_t_126_ = _t_127_ * _t_128_;
_t_118_ += _t_126_ * stry[j];
_t_53_ = u2[i+1][j][k+1];
_t_72_ -= u2[i-1][j][k-1];
_t_69_ += c1 * _t_72_;
_t_149_ = -u2[i-1][j][k-1];
_t_53_ -= u2[i-1][j][k+1];
_t_50_ += c1 * _t_53_;
_t_149_ += u2[i-1][j][k+1];
_t_146_ += c1 * _t_149_;
_t_144_ = _t_145_ * _t_146_;
_t_118_ += _t_144_ * stry[j];
_t_135_ = -u3[i+1][j][k-2];
_t_39_ = u3[i+1][j][k-2];
_t_135_ += u3[i+1][j][k+2];
_t_133_ = c2 * _t_135_;
_t_20_ = u3[i+1][j][k+2];
_t_39_ -= u3[i-1][j][k-2];
_t_36_ += c1 * _t_39_;
_t_153_ = -u3[i-1][j][k-2];
_t_20_ -= u3[i-1][j][k+2];
_t_17_ += c1 * _t_20_;
_t_153_ += u3[i-1][j][k+2];
_t_151_ = c2 * _t_153_;
_t_136_ = -u3[i+1][j][k-1];
_t_78_ = u3[i+1][j][k-1];
_t_136_ += u3[i+1][j][k+1];
_t_133_ += c1 * _t_136_;
_t_118_ += _t_132_ * _t_133_;
_t_59_ = u3[i+1][j][k+1];
_t_78_ -= u3[i-1][j][k-1];
_t_75_ += c1 * _t_78_;
_t_154_ = -u3[i-1][j][k-1];
_t_59_ -= u3[i-1][j][k+1];
_t_56_ += c1 * _t_59_;
_t_154_ += u3[i-1][j][k+1];
_t_151_ += c1 * _t_154_;
_t_118_ += _t_150_ * _t_151_;
_t_80_ += c1 * _t_118_;
_t_79_ = _t_80_ * stry[j];
r1ic0jc0kc0 += _t_79_;
_t_26_ = 2.0 * mu[i][j][k-2];
_t_26_ += la[i][j][k-2];
_t_25_ = _t_26_ * met2[i][j][k-2];
_t_178_ = la[i][j][k-2] * met2[i][j][k-2];
_t_23_ = _t_25_ * met1[i][j][k-2];
_t_22_ = _t_23_ * _t_24_;
_t_21_ = _t_22_ * strx[i];
_t_1_ = _t_21_ * stry[j];
_t_176_ = _t_178_ * met1[i][j][k-2];
_t_37_ = mu[i][j][k-2] * met4[i][j][k-2];
_t_35_ = _t_37_ * met1[i][j][k-2];
_t_34_ = _t_35_ * _t_36_;
_t_1_ += _t_34_ * stry[j];
_t_31_ = mu[i][j][k-2] * met3[i][j][k-2];
_t_173_ = mu[i][j][k-2] * met3[i][j][k-2];
_t_29_ = _t_31_ * met1[i][j][k-2];
_t_1_ += _t_29_ * _t_30_;
_t_171_ = _t_173_ * met1[i][j][k-2];
_t_46_ = 2.0 * mu[i][j][k+1];
_t_46_ += la[i][j][k+1];
_t_45_ = _t_46_ * met2[i][j][k+1];
_t_191_ = la[i][j][k+1] * met2[i][j][k+1];
_t_43_ = _t_45_ * met1[i][j][k+1];
_t_42_ = _t_43_ * _t_44_;
_t_189_ = _t_191_ * met1[i][j][k+1];
_t_41_ = _t_42_ * strx[i+2];
_t_40_ = _t_41_ * stry[j];
_t_57_ = mu[i][j][k+1] * met4[i][j][k+1];
_t_55_ = _t_57_ * met1[i][j][k+1];
_t_54_ = _t_55_ * _t_56_;
_t_40_ += _t_54_ * stry[j];
_t_51_ = mu[i][j][k+1] * met3[i][j][k+1];
_t_186_ = mu[i][j][k+1] * met3[i][j][k+1];
_t_49_ = _t_51_ * met1[i][j][k+1];
_t_40_ += _t_49_ * _t_50_;
_t_184_ = _t_186_ * met1[i][j][k+1];
_t_65_ = 2.0 * mu[i][j][k-1];
_t_65_ += la[i][j][k-1];
_t_64_ = _t_65_ * met2[i][j][k-1];
_t_203_ = la[i][j][k-1] * met2[i][j][k-1];
_t_62_ = _t_64_ * met1[i][j][k-1];
_t_61_ = _t_62_ * _t_63_;
_t_201_ = _t_203_ * met1[i][j][k-1];
_t_60_ = _t_61_ * strx[i-2];
_t_40_ += _t_60_ * stry[j];
_t_76_ = mu[i][j][k-1] * met4[i][j][k-1];
_t_74_ = _t_76_ * met1[i][j][k-1];
_t_73_ = _t_74_ * _t_75_;
_t_40_ += _t_73_ * stry[j];
_t_70_ = mu[i][j][k-1] * met3[i][j][k-1];
_t_198_ = mu[i][j][k-1] * met3[i][j][k-1];
_t_68_ = _t_70_ * met1[i][j][k-1];
_t_40_ += _t_68_ * _t_69_;
_t_196_ = _t_198_ * met1[i][j][k-1];
_t_0_ = c1 * _t_40_;
_t_7_ = 2.0 * mu[i][j][k+2];
_t_7_ += la[i][j][k+2];
_t_6_ = _t_7_ * met2[i][j][k+2];
_t_166_ = la[i][j][k+2] * met2[i][j][k+2];
_t_4_ = _t_6_ * met1[i][j][k+2];
_t_3_ = _t_4_ * _t_5_;
_t_2_ = _t_3_ * strx[i];
_t_1_ += _t_2_ * stry[j];
_t_164_ = _t_166_ * met1[i][j][k+2];
_t_18_ = mu[i][j][k+2] * met4[i][j][k+2];
_t_16_ = _t_18_ * met1[i][j][k+2];
_t_15_ = _t_16_ * _t_17_;
_t_1_ += _t_15_ * stry[j];
_t_12_ = mu[i][j][k+2] * met3[i][j][k+2];
_t_161_ = mu[i][j][k+2] * met3[i][j][k+2];
_t_10_ = _t_12_ * met1[i][j][k+2];
_t_1_ += _t_10_ * _t_11_;
_t_0_ += c2 * _t_1_;
r1ic0jc0kc0 += _t_0_;
_t_159_ = _t_161_ * met1[i][j][k+2];
_t_162_ = -u1[i][j-2][k+2];
_t_225_ = u1[i][j-2][k+2];
_t_162_ += u1[i][j+2][k+2];
_t_160_ = c2 * _t_162_;
_t_213_ = u1[i][j+2][k+2];
_t_174_ = -u1[i][j-2][k-2];
_t_225_ -= u1[i][j-2][k-2];
_t_174_ += u1[i][j+2][k-2];
_t_213_ -= u1[i][j+2][k-2];
_t_172_ = c2 * _t_174_;
_t_211_ = c2 * _t_213_;
_t_223_ = c2 * _t_225_;
_t_163_ = -u1[i][j-1][k+2];
_t_250_ = u1[i][j-1][k+2];
_t_163_ += u1[i][j+1][k+2];
_t_160_ += c1 * _t_163_;
_t_158_ = _t_159_ * _t_160_;
_t_238_ = u1[i][j+1][k+2];
_t_157_ = _t_158_ * stry[j+2];
_t_156_ = _t_157_ * strx[i];
_t_175_ = -u1[i][j-1][k-2];
_t_250_ -= u1[i][j-1][k-2];
_t_175_ += u1[i][j+1][k-2];
_t_172_ += c1 * _t_175_;
_t_238_ -= u1[i][j+1][k-2];
_t_170_ = _t_171_ * _t_172_;
_t_169_ = _t_170_ * stry[j];
_t_156_ += _t_169_ * strx[i];
_t_236_ = c2 * _t_238_;
_t_248_ = c2 * _t_250_;
_t_167_ = -u2[i][j-2][k+2];
_t_230_ = u2[i][j-2][k+2];
_t_167_ += u2[i][j+2][k+2];
_t_165_ = c2 * _t_167_;
_t_218_ = u2[i][j+2][k+2];
_t_179_ = -u2[i][j-2][k-2];
_t_230_ -= u2[i][j-2][k-2];
_t_179_ += u2[i][j+2][k-2];
_t_218_ -= u2[i][j+2][k-2];
_t_177_ = c2 * _t_179_;
_t_216_ = c2 * _t_218_;
_t_228_ = c2 * _t_230_;
_t_168_ = -u2[i][j-1][k+2];
_t_255_ = u2[i][j-1][k+2];
_t_168_ += u2[i][j+1][k+2];
_t_165_ += c1 * _t_168_;
_t_156_ += _t_164_ * _t_165_;
_t_243_ = u2[i][j+1][k+2];
_t_180_ = -u2[i][j-1][k-2];
_t_255_ -= u2[i][j-1][k-2];
_t_180_ += u2[i][j+1][k-2];
_t_177_ += c1 * _t_180_;
_t_156_ += _t_176_ * _t_177_;
_t_243_ -= u2[i][j+1][k-2];
_t_155_ = c2 * _t_156_;
_t_241_ = c2 * _t_243_;
_t_253_ = c2 * _t_255_;
_t_187_ = -u1[i][j-2][k+1];
_t_226_ = u1[i][j-2][k+1];
_t_187_ += u1[i][j+2][k+1];
_t_185_ = c2 * _t_187_;
_t_214_ = u1[i][j+2][k+1];
_t_199_ = -u1[i][j-2][k-1];
_t_226_ -= u1[i][j-2][k-1];
_t_223_ += c1 * _t_226_;
_t_199_ += u1[i][j+2][k-1];
_t_214_ -= u1[i][j+2][k-1];
_t_211_ += c1 * _t_214_;
_t_197_ = c2 * _t_199_;
_t_188_ = -u1[i][j-1][k+1];
_t_251_ = u1[i][j-1][k+1];
_t_188_ += u1[i][j+1][k+1];
_t_185_ += c1 * _t_188_;
_t_183_ = _t_184_ * _t_185_;
_t_239_ = u1[i][j+1][k+1];
_t_182_ = _t_183_ * stry[j-2];
_t_181_ = _t_182_ * strx[i];
_t_200_ = -u1[i][j-1][k-1];
_t_251_ -= u1[i][j-1][k-1];
_t_248_ += c1 * _t_251_;
_t_200_ += u1[i][j+1][k-1];
_t_197_ += c1 * _t_200_;
_t_239_ -= u1[i][j+1][k-1];
_t_236_ += c1 * _t_239_;
_t_195_ = _t_196_ * _t_197_;
_t_194_ = _t_195_ * stry[j];
_t_181_ += _t_194_ * strx[i];
_t_192_ = -u2[i][j-2][k+1];
_t_231_ = u2[i][j-2][k+1];
_t_192_ += u2[i][j+2][k+1];
_t_190_ = c2 * _t_192_;
_t_219_ = u2[i][j+2][k+1];
_t_204_ = -u2[i][j-2][k-1];
_t_231_ -= u2[i][j-2][k-1];
_t_228_ += c1 * _t_231_;
_t_204_ += u2[i][j+2][k-1];
_t_219_ -= u2[i][j+2][k-1];
_t_216_ += c1 * _t_219_;
_t_202_ = c2 * _t_204_;
_t_193_ = -u2[i][j-1][k+1];
_t_256_ = u2[i][j-1][k+1];
_t_193_ += u2[i][j+1][k+1];
_t_190_ += c1 * _t_193_;
_t_181_ += _t_189_ * _t_190_;
_t_244_ = u2[i][j+1][k+1];
_t_205_ = -u2[i][j-1][k-1];
_t_256_ -= u2[i][j-1][k-1];
_t_253_ += c1 * _t_256_;
_t_205_ += u2[i][j+1][k-1];
_t_202_ += c1 * _t_205_;
_t_181_ += _t_201_ * _t_202_;
_t_155_ += c1 * _t_181_;
r1ic0jc0kc0 += _t_155_;
_t_244_ -= u2[i][j+1][k-1];
_t_241_ += c1 * _t_244_;
_t_229_ = mu[i][j-2][k] * met2[i][j-2][k];
_t_227_ = _t_229_ * met1[i][j-2][k];
_t_207_ = _t_227_ * _t_228_;
_t_224_ = mu[i][j-2][k] * met3[i][j-2][k];
_t_222_ = _t_224_ * met1[i][j-2][k];
_t_221_ = _t_222_ * _t_223_;
_t_220_ = _t_221_ * stry[j];
_t_207_ += _t_220_ * strx[i];
_t_266_ = mu[i][j-2][k] * met1[i][j-2][k];
_t_264_ = _t_266_ * met1[i][j-2][k];
_t_254_ = mu[i][j-1][k] * met2[i][j-1][k];
_t_252_ = _t_254_ * met1[i][j-1][k];
_t_232_ = _t_252_ * _t_253_;
_t_249_ = mu[i][j-1][k] * met3[i][j-1][k];
_t_247_ = _t_249_ * met1[i][j-1][k];
_t_246_ = _t_247_ * _t_248_;
_t_245_ = _t_246_ * stry[j];
_t_232_ += _t_245_ * strx[i];
_t_277_ = mu[i][j-1][k] * met1[i][j-1][k];
_t_275_ = _t_277_ * met1[i][j-1][k];
_t_242_ = mu[i][j+1][k] * met2[i][j+1][k];
_t_240_ = _t_242_ * met1[i][j+1][k];
_t_232_ += _t_240_ * _t_241_;
_t_237_ = mu[i][j+1][k] * met3[i][j+1][k];
_t_235_ = _t_237_ * met1[i][j+1][k];
_t_234_ = _t_235_ * _t_236_;
_t_272_ = mu[i][j+1][k] * met1[i][j+1][k];
_t_270_ = _t_272_ * met1[i][j+1][k];
_t_233_ = _t_234_ * stry[j-1];
_t_232_ += _t_233_ * strx[i];
_t_206_ = c1 * _t_232_;
_t_217_ = mu[i][j+2][k] * met2[i][j+2][k];
_t_215_ = _t_217_ * met1[i][j+2][k];
_t_207_ += _t_215_ * _t_216_;
_t_212_ = mu[i][j+2][k] * met3[i][j+2][k];
_t_210_ = _t_212_ * met1[i][j+2][k];
_t_209_ = _t_210_ * _t_211_;
_t_261_ = mu[i][j+2][k] * met1[i][j+2][k];
_t_259_ = _t_261_ * met1[i][j+2][k];
_t_208_ = _t_209_ * stry[j+1];
_t_207_ += _t_208_ * strx[i];
_t_206_ += c2 * _t_207_;
r1ic0jc0kc0 += _t_206_;
_t_262_ = -u2[i-2][j+2][k];
_t_289_ = u2[i-2][j+2][k];
_t_289_ -= u2[i-2][j-2][k];
_t_267_ = -u2[i-2][j-2][k];
_t_287_ = c2 * _t_289_;
_t_267_ += u2[i+2][j-2][k];
_t_284_ = -u2[i+2][j-2][k];
_t_262_ += u2[i+2][j+2][k];
_t_284_ += u2[i+2][j+2][k];
_t_282_ = c2 * _t_284_;
_t_260_ = c2 * _t_262_;
_t_265_ = c2 * _t_267_;
_t_263_ = -u2[i-1][j+2][k];
_t_300_ = u2[i-1][j+2][k];
_t_300_ -= u2[i-1][j-2][k];
_t_268_ = -u2[i-1][j-2][k];
_t_298_ = c2 * _t_300_;
_t_268_ += u2[i+1][j-2][k];
_t_265_ += c1 * _t_268_;
_t_258_ = _t_264_ * _t_265_;
_t_295_ = -u2[i+1][j-2][k];
_t_263_ += u2[i+1][j+2][k];
_t_260_ += c1 * _t_263_;
_t_258_ += _t_259_ * _t_260_;
_t_295_ += u2[i+1][j+2][k];
_t_293_ = c2 * _t_295_;
_t_257_ = c2 * _t_258_;
_t_285_ = -u2[i+2][j-1][k];
_t_278_ = u2[i+2][j-1][k];
_t_278_ -= u2[i-2][j-1][k];
_t_290_ = -u2[i-2][j-1][k];
_t_276_ = c2 * _t_278_;
_t_290_ += u2[i-2][j+1][k];
_t_287_ += c1 * _t_290_;
_t_280_ = _t_286_ * _t_287_;
_t_273_ = -u2[i-2][j+1][k];
_t_273_ += u2[i+2][j+1][k];
_t_285_ += u2[i+2][j+1][k];
_t_282_ += c1 * _t_285_;
_t_280_ += _t_281_ * _t_282_;
_t_257_ += c2 * _t_280_;
_t_271_ = c2 * _t_273_;
_t_274_ = -u2[i-1][j+1][k];
_t_301_ = u2[i-1][j+1][k];
_t_301_ -= u2[i-1][j-1][k];
_t_298_ += c1 * _t_301_;
_t_291_ = _t_297_ * _t_298_;
_t_279_ = -u2[i-1][j-1][k];
_t_279_ += u2[i+1][j-1][k];
_t_276_ += c1 * _t_279_;
_t_269_ = _t_275_ * _t_276_;
_t_296_ = -u2[i+1][j-1][k];
_t_274_ += u2[i+1][j+1][k];
_t_271_ += c1 * _t_274_;
_t_269_ += _t_270_ * _t_271_;
_t_257_ += c1 * _t_269_;
_t_296_ += u2[i+1][j+1][k];
_t_293_ += c1 * _t_296_;
_t_291_ += _t_292_ * _t_293_;
_t_257_ += c1 * _t_291_;
r1ic0jc0kc0 += _t_257_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 1, 8);
dim3 gridconfig (ceil(N, blockconfig.x), 1, ceil(N, blockconfig.z));
curvi <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
9,249 | #include "includes.h"
__global__ void kernelPowerTwo(const float *a, const float *b, const unsigned int length, float *a2, float *b2)
{
//TODO:
} |
9,250 | #include <iostream>
#include <vector>
//#include <cstdio> - uncomment for printf in kernels
//#include <cuda_runtime.h>
//cuda is included automatically when compiling with nvcc
typedef double REAL_T;
//-----------------------------------------------------------------------------
class CUDAEventTimer {
public:
CUDAEventTimer() {
cudaEventCreate(&start_);
cudaEventCreate(&stop_);
}
~CUDAEventTimer() {
cudaEventDestroy(start_);
cudaEventDestroy(stop_);
}
void start(cudaStream_t stream = 0) {
stream_ = stream;
cudaEventRecord(start_, stream_);
}
void stop() {
cudaEventRecord(stop_, stream_);
cudaEventSynchronize(stop_);
}
float elapsed() {
float elapsed = 0;
cudaEventElapsedTime(&elapsed, start_, stop_);
return elapsed;
}
private:
cudaEvent_t start_, stop_;
cudaStream_t stream_;
};
int compute_blocks(int length, int threads_per_block) {
//integer division:
//if length is evenly divisable by the number of threads
//is equivalent to length / threads_per_block, if not
//it is equivalent to length / threads_per_block + 1
return (length + threads_per_block - 1) / threads_per_block;
}
dim3 compute_blocks(int xsize, int ysize, int zsize,
int threads_per_block_x,
int threads_per_block_y,
int threads_per_block_z) {
return dim3(compute_blocks(xsize, threads_per_block_x),
compute_blocks(ysize, threads_per_block_y),
compute_blocks(zsize, threads_per_block_z));
}
//-----------------------------------------------------------------------------
__device__ REAL_T cell_op( REAL_T v) {
return cos(v) * exp(v);
}
//-----------------------------------------------------------------------------
__global__ void cuda_kernel(REAL_T* grid,
dim3 size,
int x_offset,
int y_offset,
int z_offset) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;// + x_offset;
const int j = blockIdx.y * blockDim.y + threadIdx.y;// + y_offset;
const int k = blockIdx.z * blockDim.z + threadIdx.z;// + z_offset;
if( i >= size.x || j >= size.y || k >= size.z ) return;
const int index = i + size.x * (j + size.y * k);
grid[ index ] = cell_op( grid[ index ] );
}
typedef long long int CYCLES;
__global__ void cuda_kernel_cycles(CYCLES cycles){
const CYCLES start = clock64();
while( (clock64() - start) < cycles );
}
//-----------------------------------------------------------------------------
int main(int argc, char** argv) {
if(argc < 5) {
std::cout << "usage: " << argv[0]
<< " xsize ysize zsize threads_per_block [kernel duration(ms)]\n";
return 1;
}
const int XSIZE = atoi(argv[1]);
const int YSIZE = atoi(argv[2]);
const int ZSIZE = atoi(argv[3]);
const int CUDA_THREADS_PER_BLOCK = atoi(argv[4]);
const size_t TOTAL_SIZE = XSIZE * YSIZE * ZSIZE;
const size_t TOTAL_BYTE_SIZE = TOTAL_SIZE * sizeof(REAL_T);
bool use_cycles = false;
int time_ms = 0;
CYCLES cycles = 0;
if( argc > 5 ) {
time_ms = atoi(argv[5]);
use_cycles = true;
}
// get clock rate in kHz
cudaDeviceProp props;
if( cudaGetDeviceProperties(&props, 0) != cudaSuccess ) return -1;
const unsigned int CLOCK_RATE_Hz = props.clockRate * 1000;
std::cout << "Clock rate (GHz): "
<< CLOCK_RATE_Hz / double(1024 * 1024 * 1024)
<< std::endl;
cycles = CLOCK_RATE_Hz * (time_ms / 1000.0);
// 3D grid setup
std::vector< REAL_T > h_grid(TOTAL_SIZE, REAL_T(0));
REAL_T* d_grid = 0;
if( cudaMalloc(&d_grid, 2*TOTAL_BYTE_SIZE) != cudaSuccess ) return -2;
if( cudaMemcpy(d_grid, &h_grid[0], TOTAL_BYTE_SIZE, cudaMemcpyHostToDevice)
!= cudaSuccess ) return -3;
// launch configuration
const dim3 CUDA_THREADS = dim3(CUDA_THREADS_PER_BLOCK,
CUDA_THREADS_PER_BLOCK,
CUDA_THREADS_PER_BLOCK);
const dim3 CUDA_BLOCKS = compute_blocks(XSIZE, YSIZE, ZSIZE,
CUDA_THREADS_PER_BLOCK,
CUDA_THREADS_PER_BLOCK,
CUDA_THREADS_PER_BLOCK);
int x_offset = 0;
int y_offset = 0;
int z_offset = 0;
const dim3 GRID_SIZE = dim3(XSIZE, YSIZE, ZSIZE);
cudaDeviceSynchronize();
// launch one kernel encompassing the entire grid...
std::cout << "Launching kernel:\n"
<< " Grid size: "
<< GRID_SIZE.x << ", " << GRID_SIZE.y << ", " << GRID_SIZE.z << std::endl
<< " Block size: "
<< CUDA_BLOCKS.x << ", " << CUDA_BLOCKS.y << ", " << CUDA_BLOCKS.z << std::endl;
CUDAEventTimer timer;
timer.start();
if( use_cycles ) {
cuda_kernel_cycles<<< CUDA_BLOCKS, CUDA_THREADS >>>(cycles);
} else {
cuda_kernel<<< CUDA_BLOCKS, CUDA_THREADS >>>(d_grid,
GRID_SIZE,
x_offset,
y_offset,
z_offset);
}
timer.stop();
const float single_elapsed = timer.elapsed();
std::cout << "Single kernel launch: " << single_elapsed << std::endl;
// ...and multiple time the same kernel on the same grid
std::cout << "Launching kernel:\n"
<< " Grid size: "
<< GRID_SIZE.x << ", " << GRID_SIZE.y << ", " << GRID_SIZE.z << std::endl
<< " Block size: 1, 1, 1" << std::endl;
cudaDeviceSynchronize();
timer.start();
for( int k = 0; k != CUDA_BLOCKS.z; ++k ) {
z_offset = k * CUDA_THREADS.z;
for( int j = 0; j != CUDA_BLOCKS.y; ++j ) {
y_offset = j * CUDA_THREADS.y;
for( int i = 0; i != CUDA_BLOCKS.x; ++i ) {
x_offset = i * CUDA_THREADS.x;
if( use_cycles ) {
cuda_kernel_cycles<<< 1, CUDA_THREADS >>>(cycles);
} else {
cuda_kernel<<< 1, CUDA_THREADS >>>(d_grid, GRID_SIZE,
x_offset, y_offset, z_offset);
}
}
}
}
timer.stop();
const float multiple_elapsed = timer.elapsed();
std::cout << "Multiple kernel launches: " << multiple_elapsed << std::endl;
std::cout << "Multiple/Single %: " << 100 * multiple_elapsed / single_elapsed << std::endl;
// cleanup
cudaFree(d_grid);
return 0;
} |
9,251 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
void printmatrix(float * mat, int n){
int i, j;
printf("\n");
for (i = 0; i < n; i++){
for (j = 0; j < n; j++){
printf("%f ", mat[i*n + j]);
}
printf("\n");
}
}
void writematrix(FILE *outfile, float * mat, int n){
int i, j;
for (i = 0; i < n; i++){
for (j = 0; j < n; j++){
fprintf(outfile, "%f ", mat[i*n + j]);
}
fprintf(outfile, "\n");
}
}
__global__ void jacobi_iter(float *dmatrix, float *dmatrix2, float *fix_points, int n){
int x = blockIdx.x;
int y = threadIdx.x;
if (fix_points[x*n + y] != 1.0){
dmatrix2[x*n + y] = dmatrix[x*n + 1 + y]*0.25;
dmatrix2[x*n + y] += dmatrix[x*n - 1 + y]*0.25;
dmatrix2[x*n + y] += dmatrix[(x-1)*n + y]*0.25;
dmatrix2[x*n + y] += dmatrix[(x+1)*n + y]*0.25;
}
}
__global__ void max_diff_reduce(float *mat1, float *mat2, float *reduced_mat) {
extern __shared__ float sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = mat1[i] - mat2[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] = fmax((float)sdata[tid], (float)sdata[tid + s]);
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) {
reduced_mat[blockIdx.x] = sdata[0];
}
}
int main(int argc, char **argv){
char * filename;
filename = argv[1];
FILE *spec = fopen(filename, "r");
FILE *outfile = fopen(argv[2], "w");
int n, i, j, iter; //size of square matrix
float delta; //for ending iterations
int check;
check = fscanf(spec, "%d %f\n", &n, &delta);
int msize = n*n*sizeof(float);
float *matrix = (float *)malloc(msize); //host matrix
float *matrix2 = (float *)malloc(msize); //host matrix
float *fix_points = (float *)malloc(msize); //host matrix
for (i = 0; i < n*n; i++){
fix_points[i] = 0.0;
}
float *reduced_mat = (float *)malloc(n*sizeof(float));
float *dmatrix; // kernel matrix for alternating
cudaMalloc((void**) &dmatrix, msize);
float *dmatrix2; // kernel matrix for alternating
cudaMalloc((void**) &dmatrix2, msize);
float *dfix_points; // kernel matrix fixed points
cudaMalloc((void**) &dfix_points, msize);
float *dreduced_mat;
cudaMalloc((void**) &dreduced_mat, n*sizeof(float));
int urow, ucol, drow, dcol;
float val;
while (fscanf(spec, "\n%f %d %d %d %d\n", &val, &urow, &ucol, &drow, &dcol) == 5){
printf("%f %d %d %d %d\n", val, urow, ucol, drow, dcol);
for (i=urow; i<=drow; i++){
for(j=ucol; j<=dcol; j++){
matrix[i*n + j] = val;
fix_points[i*n + j] = 1.0;
}
}
}
//printmatrix(matrix, n);
fclose(spec);
//printf("msize: %d delta %f\n", n*n, delta);
cudaMemcpy(dmatrix, matrix, msize, cudaMemcpyHostToDevice);
cudaMemcpy(dmatrix2, matrix, msize, cudaMemcpyHostToDevice);
cudaMemcpy(dfix_points, fix_points, msize, cudaMemcpyHostToDevice);
float maxdiff = 2.0*delta;
dim3 dimGrid(n,1);
dim3 dimBlock(n,1,1);
iter = 0;
maxdiff = 3;
while(maxdiff > delta){
iter += 1;
jacobi_iter<<<dimGrid, dimBlock>>>(dmatrix, dmatrix2, dfix_points, n);
cudaThreadSynchronize();
cudaMemcpy(matrix, dmatrix2, msize, cudaMemcpyDeviceToHost);
jacobi_iter<<<dimGrid, dimBlock>>>(dmatrix2, dmatrix, dfix_points, n);
cudaThreadSynchronize();
cudaMemcpy(matrix2, dmatrix, msize, cudaMemcpyDeviceToHost);
//max_diff_reduce<<<dimGrid, dimBlock>>>(dmatrix2, dmatrix, dreduced_mat);
// cudaMemcpy(reduced_mat, dreduced_mat, n*sizeof(float), cudaMemcpyDeviceToHost);
maxdiff = fabs(matrix[0] - matrix2[0]);
//maxdiff = reduced_mat[0];
for(i=1; i<n*n; i++){
maxdiff = fmax((float) maxdiff, (float) fabs((matrix[i]-matrix2[i])));
}
printf("\niter: %d maxdiff: %f delta: %f\n", iter, maxdiff, delta);
}
cudaMemcpy(matrix, dmatrix2, msize, cudaMemcpyDeviceToHost);
//printmatrix(matrix, n);
cudaMemcpy(matrix, dmatrix, msize, cudaMemcpyDeviceToHost);
//printmatrix(matrix, n);
writematrix(outfile, matrix2, n);
fclose(outfile);
}
|
9,252 | #include <iostream>
constexpr int LEN_INITAL = 8;
constexpr int STEPS = 6;
constexpr int LEN_W = 1 + 2 * STEPS;
constexpr int LEN_Z = LEN_W;
constexpr int LEN_Y = LEN_INITAL + 2 * STEPS;
constexpr int LEN_X = LEN_INITAL + 2 * STEPS;
constexpr int LEN_TOTAL = LEN_W * LEN_Z * LEN_Y * LEN_X;
constexpr int SIZE_W = LEN_Z * LEN_Y * LEN_X;
constexpr int SIZE_Z = LEN_Y * LEN_X;
constexpr int SIZE_Y = LEN_X;
constexpr int NUM_THREADS = 512;
constexpr int NUM_BLOCKS = LEN_TOTAL / NUM_THREADS;
inline int coord_to_idx(const int w, const int z, const int y, const int x) {
return
w * SIZE_W +
z * SIZE_Z +
y * SIZE_Y +
x;
}
inline void print_slice(const int* grid, const int w, const int z) {
for (int row = 0; row < LEN_Y; ++row) {
for (int col = 0; col < LEN_X; ++col) {
std::cout << grid[coord_to_idx(w, z, row, col)] << " ";
}
std::cout << "\n";
}
}
__device__
int coord_to_idx_dev(const int w, const int z, const int y, const int x) {
return w * SIZE_W + z * SIZE_Z + y * SIZE_Y + x;
}
__global__
void step(const int* grid, int* grid_next) {
// Find out where we are.
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < LEN_TOTAL) {
int left = idx;
int w = idx / SIZE_W;
left = idx - w * SIZE_W;
int z = left / SIZE_Z;
left = left - z * SIZE_Z;
int y = left / SIZE_Y;
int x = left - y * SIZE_Y;
// TODO: for loop here?
const int active = grid[idx];
// Count active neighbors.
int active_neighbors = 0;
int min_nw = max(0, w - 1);
int max_nw = min(LEN_W, w + 2);
int min_nz = max(0, z - 1);
int max_nz = min(LEN_Z, z + 2);
int min_ny = max(0, y - 1);
int max_ny = min(LEN_Y, y + 2);
int min_nx = max(0, x - 1);
int max_nx = min(LEN_X, x + 2);
for (int nw = min_nw; nw < max_nw; ++nw) {
for (int nz = min_nz; nz < max_nz; ++nz) {
for (int ny = min_ny; ny < max_ny; ++ny) {
for (int nx = min_nx; nx < max_nx; ++nx) {
active_neighbors += grid[coord_to_idx_dev(nw, nz, ny, nx)];
}
}
}
}
active_neighbors -= active;
// Rules
int active_next = active;
if (active == 1 && (active_neighbors < 2 || active_neighbors > 3)) {
active_next = 0;
}
else if (active == 0 && active_neighbors == 3) {
active_next = 1;
}
//active_next = idx;
grid_next[idx] = active_next;
}
}
int main() {
// Initialize grid.
int* grid;
cudaMallocManaged(&grid, LEN_TOTAL * sizeof(int));
cudaMemset(grid, 0, LEN_TOTAL);
int initial_grid[LEN_INITAL][LEN_INITAL] = {
{1, 1, 0, 0, 1, 0, 1, 0},
{1, 1, 1, 0, 1, 0, 1, 1},
{0, 0, 1, 1, 1, 0, 0, 1},
{0, 1, 0, 0, 0, 0, 1, 1},
{0, 1, 0, 0, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 0, 0, 0},
{1, 1, 1, 1, 1, 1, 1, 0},
{1, 0, 1, 1, 0, 1, 0, 1}
};
for (int row = 0; row < LEN_INITAL; ++row) {
for (int col = 0; col < LEN_INITAL; ++col) {
grid[coord_to_idx(STEPS, STEPS, STEPS + row, STEPS + col)] = initial_grid[row][col];
}
}
//print_slice(grid, STEPS, STEPS);
int* grid_next;
cudaMallocManaged(&grid_next, LEN_TOTAL * sizeof(int));
for (int i = 0; i < STEPS; ++i) {
//std::cout << "Step " << i << "\n";
step<<<NUM_BLOCKS, NUM_THREADS>>>(grid, grid_next);
cudaDeviceSynchronize();
std::swap(grid, grid_next);
//print_slice(grid, STEPS, STEPS);
}
// Count actives.
int count = 0;
for (int i = 0; i < LEN_TOTAL; ++i) {
count += grid[i];
}
std::cout << "Active: " << count << "\n";
cudaFree(grid);
cudaFree(grid_next);
return 0;
}
|
9,253 | #include "includes.h"
__global__ void reduction_interleaved_unrolling_blocks8_1(int * input, int * temp, int size)
{
int tid = threadIdx.x;
//element index for this thread
int index = blockDim.x * blockIdx.x * 8 + threadIdx.x;
//local data pointer
int * i_data = input + blockDim.x * blockIdx.x * 8;
if ((index + 7 * blockDim.x) < size)
{
int a1 = input[index];
int a2 = input[index + blockDim.x];
int a3 = input[index + 2 * blockDim.x];
int a4 = input[index + 3 * blockDim.x];
int a5 = input[index + 4 * blockDim.x];
int a6 = input[index + 5 * blockDim.x];
int a7 = input[index + 6 * blockDim.x];
int a8 = input[index + 7 * blockDim.x];
input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
__syncthreads();
for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2)
{
if (tid < offset)
{
i_data[tid] += i_data[tid + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp[blockIdx.x] = i_data[0];
}
} |
9,254 | /* ==================================================================
Programmers: Conner Wulf (connerwulf@mail.usf.edu),
Derek Rodriguez (derek23@mail.usf.edu)
David Hoambrecker (david106@mail.usf.edu)
To Compile use: nvcc -o queens proj3-Nqueens.cu
you can specify the board size by compiling with: nvcc -o queens proj3-Nqueens.cu -DNUM=a
* where a must be >= 4 and <= 22
The program reads in 2 arguments, the first is the number of tuples generated by blockIdx.x
the second is the number of groups of columns, size multiple of board size.
ex. ./queens 4 1000 (based on board size is 12)
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <iostream>
#include <cuda_runtime.h>
#include <cuda.h>
#include <vector>
using namespace std;
static int total = 0;
unsigned long count = 0;
long *answer;
struct timezone Idunno;
struct timeval startTime, endTime;
#ifndef NUM
#define NUM 12
#endif
//CPU helper function to test is a queen can be placed
int isAllowed(int **board, int row, int col, int n)
{
int x,y;
//left check
for (x = 0; x < col; x++)
{
if(board[row][x] == 1)
{
return 0;
}
}
//check left diagonal up
for(x = row, y = col; x >= 0 && y >= 0; x--, y--)
{
if (board[x][y] == 1)
{
return 0;
}
}
for(x = row, y = col; x < n && y >= 0; x++, y--)
{
if (board[x][y] == 1)
{
return 0;
}
}
return 1;
}
// CPU Solver for N-queens problem
int Solver(int **board, int col, int n)
{
if (col >= n)
{
total++;
return 1;
}
int nextState = 0;
for(int k = 0; k < n; k++)
{
if (isAllowed(board,k,col, n))
{
board[k][col] = 1;
nextState = Solver(board, col + 1, n);
board[k][col] = 0;
}
}
return nextState;
}
// GPU parallel kernel for N-Queens
__global__ void kernel(long *answer, int SegSize, int nBX, int nBY, int genNum)
{
__shared__ long sol[NUM][NUM];
__shared__ char tup[NUM][NUM][NUM];
int wrongCount = 0;
sol[threadIdx.x][threadIdx.y] = 0;
tup[threadIdx.x][threadIdx.y][0] = blockIdx.y % SegSize;
int totalGenerated = powf(NUM, genNum);
int blockYSeg = blockIdx.y / SegSize;
int workLoad = totalGenerated / nBY;
int runOff = totalGenerated - workLoad *nBY;
int temp = blockIdx.x;
for(int x = 1; x <=nBX; x++)
{
tup[threadIdx.x][threadIdx.y][x] = temp % NUM;
temp = temp / NUM;
}
int tupCount = nBX;
tup[threadIdx.x][threadIdx.y][++tupCount] = threadIdx.x;
tup[threadIdx.x][threadIdx.y][++tupCount] = threadIdx.y;
for(int k = tupCount; k > 0; k--)
{
for(int m = k - 1, counter = 1; m >= 0; counter++, m--)
{
//Checks diagonal left, down
wrongCount += (tup[threadIdx.x][threadIdx.y][k] + counter) == tup[threadIdx.x][threadIdx.y][m];
//Checks row its in
wrongCount += tup[threadIdx.x][threadIdx.y][k] == tup[threadIdx.x][threadIdx.y][m];
// Checks diagonal left, up
wrongCount += (tup[threadIdx.x][threadIdx.y][k] - counter) == tup[threadIdx.x][threadIdx.y][m];
}
}
if (wrongCount == 0)
{
int begin = blockYSeg * workLoad;
for(int c = begin; c < begin + workLoad + (blockYSeg == nBY - 1) * runOff; c++)
{
//last values is made in tuple, convert and store to tup array
int temp = c;
for(int q = 0, z =tupCount + 1; q < genNum; z++, q++)
{
tup[threadIdx.x][threadIdx.y][q] = temp % NUM;
temp = temp / NUM;
}
//checks that the genNum tuple values are indeed unique (saves work overall)
for(int a = 0; a < genNum && wrongCount == 0; a++){
for(int b = 0; b < genNum && wrongCount == 0; b++){
wrongCount += tup[threadIdx.x][threadIdx.y][tupCount + 1 + a] == tup[threadIdx.x][threadIdx.y][tupCount + 1 + b] && a != b;
}
}
for(int k = NUM -1; k > wrongCount; k--)
{
for(int m = k - 1, counter = 1; m >= 0; counter++, m--)
{
//Checks diagonal left, down
wrongCount += (tup[threadIdx.x][threadIdx.y][k] + counter) == tup[threadIdx.x][threadIdx.y][m];
//Checks row its in
wrongCount += tup[threadIdx.x][threadIdx.y][k] == tup[threadIdx.x][threadIdx.y][m];
// Checks diagonal left, up
wrongCount += (tup[threadIdx.x][threadIdx.y][k] - counter) == tup[threadIdx.x][threadIdx.y][m];
}
}
sol[threadIdx.x][threadIdx.y] += !(wrongCount);
wrongCount = 0;
}
}
__syncthreads();
// sum all threads in block to get total
if(threadIdx.x == 0 && threadIdx.y == 0)
{
long total = 0;
for(int i =0; i < NUM; i++){
for(int j = 0; j < NUM; j++){
total += sol[i][j];
}
}
answer[gridDim.x * blockIdx.y + blockIdx.x] = total;
}
__syncthreads();
}
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("CPU Time: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
int main(int argc, char **argv) {
if(argc < 3) {
printf("\nError, too few arguments. Usage: ./CHANGE THIS\n");
return -1;
}
const int NUM_TUPLEX = atoi(argv[1]);
const int NUM_TUPLEY = atoi(argv[2]);
const int generatedNum = NUM - 3 - NUM_TUPLEX;
cudaEvent_t start, stop;
float elapsedTime;
if(generatedNum < 0){
printf("\nThe numbers generated iteratively cannot be less than 0.\n");
exit(1);
}
//ensure N is in the correct range
if(NUM < 4 || NUM > 22){
printf("\nN(%d) must be between 4 and 22 inclusive\n", NUM);
exit(1);
}
//ensure that at least one of the tuple values is generated by the block's X coordinate value
if(NUM_TUPLEX < 1){
printf("\nThe number of tuples generated by each block's X coordinate value must be >= 1\n");
exit(1);
}
//ensure that the number of Y segments that the numGen work is divided into
//is at least one per work segment
if(NUM_TUPLEY > pow(NUM, generatedNum)){
printf("\n number of groups of columns must be less than or equal to N^(N - 3 - (1st ARG))\n");
exit(1);
}
//CPU setup
int **board;
board = (int **) malloc(NUM * sizeof(int *));
for (int i = 0; i < NUM; i++) {
board[i] = (int *) malloc(NUM * sizeof(int));
}
for (int i = 0; i < NUM; i++) {
for (int j = 0; j < NUM; j++) {
board[i][j] = 0;
}
}
int WIDTH, HEIGHT, NUM_BLOCKS, YSegmentSize;
WIDTH = pow(NUM, NUM_TUPLEX);
YSegmentSize = (NUM / 2) + (NUM % 2);
HEIGHT = YSegmentSize + NUM_TUPLEY;
NUM_BLOCKS = WIDTH * HEIGHT;
long *d_answer;
answer = new long[NUM_BLOCKS];
cudaMalloc((void **) &d_answer, sizeof(long) * NUM_BLOCKS);
dim3 block(NUM, NUM); //threads w x h
dim3 grid(WIDTH, HEIGHT); //blocks w x h
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
kernel<<<grid, block>>>(d_answer, YSegmentSize, NUM_TUPLEX, NUM_TUPLEY, generatedNum);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(answer,d_answer, sizeof(long) * NUM_BLOCKS, cudaMemcpyDeviceToHost);
srand(1);
gettimeofday(&startTime, &Idunno);
Solver(board, 0, NUM);
printf("\nTotal Solutions: %d boards\n\n",total);
report_running_time();
printf("GPU Time: %f secs\n", (elapsedTime / 1000.00));
return 0;
}
|
9,255 | #include <stdio.h>
#include <iostream>
#include <chrono>
#include <iomanip>
using namespace std;
__global__
void convert(char *input, char *output, int len, int step)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < size) *(output + (2 - index % 3) * len + index / 3) = *(input+index);
char * oR = output + index * step, *oG = oR + len, *oB = oG + len;
const char *iPos = (const char *)input + index * step * 3;
for(int i = 0; i < step; i += 1){
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
}
}
extern "C" void rgbConvert(char* input, int size, unsigned char * pBuffer){
int len = size / 3;
char * cinput = (char *) pBuffer;
char * coutput = (char *) pBuffer + size;
int block = 128;
int thread = 128;
cudaMemcpy(pBuffer, input, size, cudaMemcpyHostToDevice );
cudaMemset(pBuffer + len * 6, 0, len);
cudaDeviceSynchronize();
if(len/block/thread > 0){
convert<<<len/block/thread, thread>>>(cinput, coutput, len, block);
convert<<<len%(block*thread), 1>>>(cinput + len/block/thread*block*thread*3, coutput + len/block/thread*block*thread, len, 1);
}else{
convert<<<len%(block*thread), 1>>>(cinput , coutput , len, 1);
}
cudaDeviceSynchronize();
}
__global__
void convertBack(char *input, char *output, int len, int step)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < size) *(output + (2 - index % 3) * len + index / 3) = *(input+index);
char * iR = input + index * step , *iG = iR + len, *iB = iG + len;
char *oPos = output + index * step * 3;
for (int i = 0; i < step; i++){
*(oPos++) = *(iB++);
*(oPos++) = *(iG++);
*(oPos++) = *(iR++);
}
}
extern "C" void rgbConvertBack(int len, unsigned char * pBuffer){
int block = 128;
int thread = 128;
char * cinput = (char *) pBuffer;
char * coutput = (char *) pBuffer + len * 4;
if(len/block/thread > 0){
convertBack<<<len/block/thread, thread>>>(cinput, coutput, len, thread);
convertBack<<<len%(block*thread), 1>>>(cinput + len/block/thread*block*thread, coutput + len/block/thread*block*thread*3, len, 1);
}else{
convertBack<<<len%(block*thread), 1>>>(cinput, coutput, len, 1);
}
cudaDeviceSynchronize();
}
__global__
void convert128(char *input, char *output, int len, int step)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < size) *(output + (2 - index % 3) * len + index / 3) = *(input+index);
char * oR = output + index * step, *oG = oR + len, *oB = oG + len;
const char *iPos = (const char *)input + index * step * 3;
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
*(oB++) = *(iPos++);
*(oG++) = *(iPos++);
*(oR++) = *(iPos++);
}
/*
int main(){
int size = 1811520*3;
char * input = (char*) malloc(size);
memset(input, 2, size);
char* output = rgbConvert(input, size);
cudaFree(output);
return EXIT_SUCCESS;
}
*/ |
9,256 | #include <stdio.h>
#include <chrono>
// Constant values.
int NUM_PARTICLES = 10000;
int NUM_ITERATIONS = 1000;
int BLOCK_SIZE = 32;
// Data of a single particule.
struct Particle {
float3 position;
float3 velocity;
};
// Generate a random number between in range [a, b].
#define RAND_FLOAT(a,b) (a + (float)rand() / RAND_MAX * (b-a))
// Check if the given command has returned an error.
#define CUDA_CHECK(cmd) if ((cmd) != cudaSuccess) { \
printf("ERROR: cuda error at line %d\n", __LINE__); abort(); }
// Initialize the array of particules.
Particle *CreateParticuleArray()
{
srand(42);
Particle *array;
CUDA_CHECK(cudaMallocManaged(&array, sizeof(Particle) * NUM_PARTICLES));
for (int index = 0; index < NUM_PARTICLES; index++) {
array[index].velocity.x = RAND_FLOAT(1, 10);
array[index].velocity.y = RAND_FLOAT(1, 20);
array[index].velocity.z = RAND_FLOAT(5, 30);
array[index].position.x = RAND_FLOAT(-100, 100);
array[index].position.y = RAND_FLOAT(-100, 100);
array[index].position.z = RAND_FLOAT(-100, 100);
}
return array;
}
// Update a particule by one single step.
__device__ void UpdateParticule(Particle &particule, const float3 &dvel)
{
particule.velocity.x += dvel.x;
particule.velocity.y += dvel.y;
particule.velocity.z += dvel.z;
particule.position.x += particule.velocity.x;
particule.position.y += particule.velocity.y;
particule.position.z += particule.velocity.z;
}
// GPU kernel for updating a particule by one single step.
__global__ void GpuUpdate(Particle *particules, int NUM_PARTICLES, float3 dvel)
{
auto index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < NUM_PARTICLES)
UpdateParticule(particules[index], dvel);
}
// Make all iterations on GPU.
void GpuInterations(Particle *array, float3 dvel)
{
int num_blocks = (NUM_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE;
for (int i = 0; i < NUM_ITERATIONS; i++) {
GpuUpdate<<<num_blocks, BLOCK_SIZE>>>(array, NUM_PARTICLES, dvel);
cudaDeviceSynchronize(); // Make sure the particules were updated.
}
}
// Entry point of this program.
int main(int argc, const char **argv)
{
// When the program is ran with -h, show usage.
if (argc == 2 && !strcmp(argv[1], "-h")) {
printf("Usage: ./exercise_2a [num particules] [num iterations] [block size]\n");
exit(0);
}
// Read number of particules, number of iterations and block size.
if (argc >= 2) NUM_PARTICLES = atoi(argv[1]);
if (argc >= 3) NUM_ITERATIONS = atoi(argv[2]);
if (argc >= 4) BLOCK_SIZE = atoi(argv[3]);
// Velocity increment on each step.
float3 dvel = make_float3(-1.f, 3.45f, 7.3f);
// Run iterations on GPU.
Particle *array = CreateParticuleArray();
printf("\nStarting GPU test ...\n");
auto start = std::chrono::system_clock::now();
GpuInterations(array, dvel);
auto end = std::chrono::system_clock::now();
int ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
printf("GPU time: %d ms\n\n", ms);
}
|
9,257 | #include <iostream>
int main() {
std::cout << "Hello Cuda World" << std::endl;
return 0;
}
|
9,258 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf("\nNumber of GPU devices: %i\n", deviceCount);
int driverVersion;
int runtimeVersion;
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
cudaDeviceProp deviceProperties;
printf("CUDA Driver Version / Runtime Version: %d.%d / %d.%d\n\n\n",
driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000,
(runtimeVersion%100)/10);
for(int i=0; i<deviceCount; i++)
{
cudaGetDeviceProperties(&deviceProperties, i);
printf("Name: %s\n", deviceProperties.name);
}
}
|
9,259 |
#include <iostream>
#ifdef _WIN32
# define IMPORT __declspec(dllimport)
#else
# define IMPORT
#endif
int static_cuda11_func(int);
IMPORT int shared_cuda11_func(int);
void test_functions()
{
static_cuda11_func(int(42));
shared_cuda11_func(int(42));
}
int main(int argc, char** argv)
{
test_functions();
return 0;
}
|
9,260 | #include <stdio.h>
#include <iostream>
#include "cuda_runtime.h"
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <time.h>
#include <cuda_runtime_api.h>
/*Realization of the multiplication between a vector and a matrix, on the GPU.
It realizes multiplication between a vector of 4 element and a matrix (4x4),
so as result is expected a vector of 4 element.
We initialized the input array on the host (CPU) and trasferred to device (GPU).
Then, CPU launches the kernel which is elaborated on GPU.
Finally, the results is trasferred from GPU to CPU and printed out*/
__global__ void vector_matrix_mult(float *d_vec, float *d_mat, float *d_out, const int N, const int M)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
float sum = 0;
if (tid < M) {
for (int i = 0; i < N; i++)
sum += d_vec[i] * d_mat[(i*M) + tid];
d_out[tid] = sum;
}
}
int main(int argc, char ** argv) {
float elapsed = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
const int N = 4;
const int M = N;
const int ARRAY_SIZE = N;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int MATRIX_SIZE = ARRAY_SIZE*ARRAY_SIZE;
const int MATRIX_BYTES = MATRIX_SIZE * sizeof(float);
// generate the input vector and input matrix on the host
float h_vec[ARRAY_SIZE] = { 2, 1, 1, 1 };
float h_mat[ARRAY_SIZE][ARRAY_SIZE] = { 2, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 };
float h_out[ARRAY_SIZE];
//declare GPU memory pointers
float * d_vec;
float * d_mat;
float * d_out;
//allocate GPU memory
cudaMalloc((void**)&d_vec, ARRAY_BYTES);
cudaMalloc((void**)&d_mat, MATRIX_BYTES);
cudaMalloc((void**)&d_out, ARRAY_BYTES);
//transfer the input from CPU to GPU
cudaMemcpy(d_vec, h_vec, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_mat, h_mat, MATRIX_BYTES, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
//launch the kernel
vector_matrix_mult <<<1, ARRAY_SIZE>>> (d_vec, d_mat, d_out, N, M);
cudaEventRecord(stop, 0);
//trasfer the results from GPU to CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
//print out the resulting array
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
printf("The GPU time elapsed is %.6f ms \"", elapsed);
//free GPU location memory
cudaFree(d_vec);
cudaFree(d_mat);
cudaFree(d_out);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
/*Realization though CPU*/
/*
#include <time.h>
int main(int argc, char ** argv) {
clock_t cpu_startTime, cpu_stopTime;
double cpu_elapsedTime = 0;
cpu_startTime = clock();
const int ARRAY_SIZE = 4;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_vec[ARRAY_SIZE] = { 1, 1, 1, 1 };
float h_mat[ARRAY_SIZE][ARRAY_SIZE] = { 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 };
float risultato[ARRAY_SIZE];
float h_out[ARRAY_SIZE];
float tot = 0;
int i, j;
for (j = 0; j < ARRAY_SIZE; j++){
for (i = 0; i < ARRAY_SIZE; i++) {
risultato[i] = h_vec[i] * h_mat[i][j];
tot += risultato[i];
}
h_out[j] = tot;
tot = 0;
printf("%f", h_out[j]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cpu_stopTime = clock();
cpu_elapsedTime = ((cpu_startTime - cpu_stopTime) / CLOCKS_PER_SEC);
printf("The CPU elapsed time is %.6f ms \"", cpu_elapsedTime);
return 0;
}
*/ |
9,261 |
#include "planeslicer.cuh"
//#include "stdio.h"
#define flt_eps 1.192092896e-07f
__device__ float dot(float3 a, float3 b)
{
return (a.x * b.x + a.y * b.y + a.z * b.z);
}
__global__ void computeIntersectionWithPlane_Kernel( int numTriangles , float3 *triangles , float3 *edges , int *numEdges , float3 planePosition , float3 planeNormal )
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if( tid > numTriangles - 1 )
return;
float3 *t3 = triangles + tid * 3;
extern __shared__ float sData[];
int *nIntersections = (int*)sData;
int *tEid = nIntersections + 1;
if (threadIdx.x == 0)
{
*nIntersections = 0;
*tEid = 0;
}
__syncthreads();
float3 *sIntersections = (float3*)( sData + 2 );
int p = 0, n = 0;
float3 d1, d2, d3, l1, l2, l3;
d1.x = planePosition.x - t3[0].x;
d1.y = planePosition.y - t3[0].y;
d1.z = planePosition.z - t3[0].z;
d2.x = planePosition.x - t3[1].x;
d2.y = planePosition.y - t3[1].y;
d2.z = planePosition.z - t3[1].z;
d3.x = planePosition.x - t3[2].x;
d3.y = planePosition.y - t3[2].y;
d3.z = planePosition.z - t3[2].z;
l1.x = t3[0].x - t3[1].x;
l1.y = t3[0].y - t3[1].y;
l1.z = t3[0].z - t3[1].z;
l2.x = t3[1].x - t3[2].x;
l2.y = t3[1].y - t3[2].y;
l2.z = t3[1].z - t3[2].z;
l3.x = t3[2].x - t3[0].x;
l3.y = t3[2].y - t3[0].y;
l3.z = t3[2].z - t3[0].z;
float f1 = dot( d1 , planeNormal );
float f2 = dot( d2 , planeNormal );
float f3 = dot( d3 , planeNormal );
p += f1 > 0;
p += f2 > 0;
p += f3 > 0;
n += f1 < 0;
n += f2 < 0;
n += f3 < 0;
if (p < 3 && p > 0)
{
int id = atomicAdd(nIntersections, 1);
float lambda1 = f2 / dot(l1, planeNormal);
float lambda2 = f3 / dot(l2, planeNormal);
float lambda3 = f1 / dot(l3, planeNormal);
bool p1Found = false, p2Found = false;
if (lambda1 >= 0 && lambda1 <= 1)
{
sIntersections[2 * id].x = lambda1 * t3[0].x + (1 - lambda1) * t3[1].x;
sIntersections[2 * id].y = lambda1 * t3[0].y + (1 - lambda1) * t3[1].y;
sIntersections[2 * id].z = lambda1 * t3[0].z + (1 - lambda1) * t3[1].z;
p1Found = true;
}
if (lambda2 >= 0 && lambda2 <= 1)
{
if (!p1Found)
{
sIntersections[2 * id].x = lambda2 * t3[1].x + (1 - lambda2) * t3[2].x;
sIntersections[2 * id].y = lambda2 * t3[1].y + (1 - lambda2) * t3[2].y;
sIntersections[2 * id].z = lambda2 * t3[1].z + (1 - lambda2) * t3[2].z;
p1Found = true;
}
else
{
sIntersections[2 * id + 1].x = lambda2 * t3[1].x + (1 - lambda2) * t3[2].x;
sIntersections[2 * id + 1].y = lambda2 * t3[1].y + (1 - lambda2) * t3[2].y;
sIntersections[2 * id + 1].z = lambda2 * t3[1].z + (1 - lambda2) * t3[2].z;
p2Found = true;
}
}
if (lambda3 >= 0 && lambda3 <= 1)
{
if (!p2Found)
{
sIntersections[2 * id + 1].x = lambda3 * t3[2].x + (1 - lambda3) * t3[0].x;
sIntersections[2 * id + 1].y = lambda3 * t3[2].y + (1 - lambda3) * t3[0].y;
sIntersections[2 * id + 1].z = lambda3 * t3[2].z + (1 - lambda3) * t3[0].z;
p2Found = true;
}
}
if (!p1Found )
{
sIntersections[2 * id].x = sIntersections[2 * id + 1].x;
sIntersections[2 * id].y = sIntersections[2 * id + 1].y;
sIntersections[2 * id].z = sIntersections[2 * id + 1].z;
}
if (!p2Found)
{
sIntersections[2 * id + 1].x = sIntersections[2 * id].x;
sIntersections[2 * id + 1].y = sIntersections[2 * id].y;
sIntersections[2 * id + 1].z = sIntersections[2 * id].z;
}
}
__syncthreads();
if (threadIdx.x == 0)
{
*tEid = atomicAdd( numEdges , *nIntersections ) ;
}
__syncthreads();
if ( *nIntersections > 0 && threadIdx.x < *nIntersections )
{
edges[2 * (*tEid + threadIdx.x)] = sIntersections[2 * threadIdx.x];
edges[2 * (*tEid + threadIdx.x) + 1] = sIntersections[2 * threadIdx.x + 1];
}
}
__global__ void computeIntersectionWithStandardPlane_Kernel( int numTriangles, float3 *triangles , float3* direction , float3 *edges , float3 *edgePointNormals , float3* collectedTris ,
int *numEdges, float3 sliceValue , int planeType )
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if ( tid > numTriangles - 1 )
return;
extern __shared__ float sData[];
int *nIntersections = (int*)sData;
int *tEid = nIntersections + 1;
if ( threadIdx.x == 0 )
{
*nIntersections = 0;
*tEid = 0;
}
__syncthreads();
float sv = 0;
if ( planeType == 0 )
{
sv = sliceValue.x;
}
else if ( planeType == 1 )
{
sv = sliceValue.y;
}
else
{
sv = sliceValue.z;
}
float *t = (float*)( triangles + 3 * tid );
float3 *n3 = direction + 3 * tid;
float v1 = t[ planeType ] - sv;
float v2 = t[3 + planeType] - sv;
float v3 = t[6 + planeType] - sv;
bool notIntersected = (v1 > 0 && v2 > 0 && v3 > 0) || (v1 < 0 && v2 < 0 && v3 < 0);
if ( notIntersected )
{
return;
}
float t1 = v1 / ( t[planeType] - t[3 + planeType] );
float t2 = v2 / ( t[3 + planeType] - t[6 + planeType]);
float t3 = v3 / ( t[6 + planeType] - t[planeType] );
float3 end1, end2;
float interpN1[3], interpN2[3];
if ( t1 > 0 && t1 < 1 )
{
end1.x = t[ 0 ] * t1 + t[ 3 ] * (1 - t1);
end1.y = t[ 1 ] * t1 + t[ 4 ] * (1 - t1);
end1.z = t[ 2 ] * t1 + t[ 5 ] * (1 - t1);
interpN1[0] = n3[0].x * t1 + n3[1].x * (1 - t1);
interpN1[1] = n3[0].y * t1 + n3[1].y * (1 - t1);
interpN1[2] = n3[0].z * t1 + n3[1].z * (1 - t1);
//interpWt1 = wt[0] * t1 + wt[1] * (1 - t1);
if (t2 > 0 && t2 < 1)
{
//end2 = pos2 * t2 + pos3 * (1 - t2);
end2.x = t[3] * t2 + t[6] * (1 - t2);
end2.y = t[4] * t2 + t[7] * (1 - t2);
end2.z = t[5] * t2 + t[8] * (1 - t2);
interpN2[0] = n3[1].x * t2 + n3[2].x * (1 - t2);
interpN2[1] = n3[1].y * t2 + n3[2].y * (1 - t2);
interpN2[2] = n3[1].z * t2 + n3[2].z * (1 - t2);
}
else
{
//end2 = pos3 * t3 + pos1 * (1 - t3);
end2.x = t[6] * t3 + t[0] * (1 - t3);
end2.y = t[7] * t3 + t[1] * (1 - t3);
end2.z = t[8] * t3 + t[2] * (1 - t3);
//interpN2 = n3 * t3 + n1 * (1 - t3);
interpN2[0] = n3[1].x * t3 + n3[2].x * (1 - t3);
interpN2[1] = n3[1].y * t3 + n3[2].y * (1 - t3);
interpN2[2] = n3[1].z * t3 + n3[2].z * (1 - t3);
}
}
else if (t2 > 0 && t2 < 1)
{
end1.x = t[3] * t2 + t[6] * (1 - t2);
end1.y = t[4] * t2 + t[7] * (1 - t2);
end1.z = t[5] * t2 + t[8] * (1 - t2);
end2.x = t[3] * t2 + t[6] * (1 - t2);
end2.y = t[4] * t2 + t[7] * (1 - t2);
end2.z = t[5] * t2 + t[8] * (1 - t2);
//end1 = pos2 * t2 + pos3 * (1 - t2);
//end2 = pos3 * t3 + pos1 * (1 - t3);
//interpN1 = n2 * t2 + n3 * (1 - t2);
//interpN2 = n3 * t3 + n1 * (1 - t3);
interpN1[0] = n3[1].x * t2 + n3[2].x * (1 - t2);
interpN1[1] = n3[1].y * t2 + n3[2].y * (1 - t2);
interpN1[2] = n3[1].z * t2 + n3[2].z * (1 - t2);
interpN2[0] = n3[2].x * t3 + n3[0].x * (1 - t3);
interpN2[1] = n3[2].y * t3 + n3[0].y * (1 - t3);
interpN2[2] = n3[2].z * t3 + n3[0].z * (1 - t3);
}
int id = atomicAdd( nIntersections , 2 );
edges[2 * id] = end1;
edges[2 * id + 1] = end2;
edgePointNormals[2 * id].x = interpN1[0];
edgePointNormals[2 * id].y = interpN1[1];
edgePointNormals[2 * id].z = interpN1[2];
edgePointNormals[2 * id + 1].x = interpN2[0];
edgePointNormals[2 * id + 1].y = interpN2[1];
edgePointNormals[2 * id + 1].z = interpN2[2];
__syncthreads();
atomicAdd( numEdges , *nIntersections );
}
cudaError_t computeIntersectionWithStandardPlane(int numTriangles, float3 *triangles, float3 *edges, float3 *oppositeEnds , float3* collectedTris, int *numEdges, float3 sliceValue, int planeType)
{
dim3 threads(128, 1);
int wB = (numTriangles + threads.x - 1) / threads.x;
int hB = 1;
dim3 blocks(wB, hB);
int sharedMemorySize = threads.x * 2 * sizeof(float3) + 2 * sizeof(int);
//computeIntersectionWithPlane_Kernel << < blocks, threads, sharedMemorySize >> >(numTriangles, triangles, edges, numEdges, sliceValue, planeNormal);
return cudaGetLastError();
}
cudaError_t computeIntersectionWithPlane(int numTriangles, float3 *triangles, float3 planePosition, float3 planeNormal, float3 *edges, int *numEdges)
{
dim3 threads( 128 , 1 );
int wB = ( numTriangles + threads.x - 1 ) / threads.x;
int hB = 1;
dim3 blocks(wB, hB);
int sharedMemorySize = threads.x * 2 * sizeof(float3) + 2 * sizeof( int );
computeIntersectionWithPlane_Kernel << < blocks, threads , sharedMemorySize >> >(numTriangles, triangles, edges, numEdges, planePosition, planeNormal);
return cudaGetLastError();
} |
9,262 | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <cuda_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
inline __device__ int indirect(int *c, int i) {
// return c[c[i & 127] & 127] + i;
return int(exp(((((float(i))))) * 1e-18)) + i;
// printf("%d\n", c[i % m] - i % m + i - i);
// return i;
}
__constant__ int const_c[m];
__global__ void fd(float *a, float *b, int *c, int n) {
__shared__ float b_s[m];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m) {
b_s[i] = 0;
}
__syncthreads();
/*
if (threadIdx.x < m) {
b_s[threadIdx.x] = c[threadIdx.x];
}
__syncthreads();
*/
/*
float sum = 0;
if (i > 0)
sum += a[indirect(c, i) - 1];
*/
// sum += a[indirect(c, i)];
// sum += a[i + b_s[i & 127]];
/*
if (i < n - 1)
sum += a[indirect(c, i) + 1];
*/
// b[i] = (i * 1e-18);
// b[i] = i;
// b[i] = c[c[c[i & 64]]];
// atomicAdd(b_s + ((unsigned)i * 34252345627) % m, 1.0f);
// i = int(((((i * 1e-20f)))));
// i = (i * 1e-10f);
// i = i * i * i * i * i % m;
// b_s[i % m] = 1;
// #define C(x) i += (i >> x);
// #define C(x) i += (i >> x);
// for (int t = 0; t < 240; t++)
// C(30);
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
for (int j = 0; j < 27; j++) {
atomicAdd(b_s + (unsigned int)(i / 4 + j * 431) % (m / 1), 1.0f);
}
__syncthreads();
if (i < m) {
atomicAdd(&b[i], b_s[i]);
}
// atomicAdd(b + i % (m * m), 1);
/*
atomicAdd(&b_s[0], sqrt(sum));
if (threadIdx.x < m) {
atomicAdd(b + threadIdx.x, b_s[threadIdx.x]);
// b[threadIdx.x] += b_s[threadIdx.x];
}
*/
}
int main() {
int n = 128 * 1024 * 1024;
float *a, *b;
int *c;
cudaMallocManaged(&a, n * sizeof(float));
cudaMallocManaged(&b, n * sizeof(float));
cudaMallocManaged(&c, m * sizeof(float));
for (int i = 0; i < n; i++) {
a[i] = i * 1e-5f;
}
for (int i = 0; i < n; i++) {
b[i] = i * 1e-5f;
}
for (int i = 0; i < m; i++) {
c[i] = 0;
}
cudaMemcpyToSymbol(const_c, c, m * sizeof(float), 0, cudaMemcpyHostToDevice);
for (auto bs : {256, 512, 1024}) {
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 4; i++) {
auto t = get_time();
fd<<<n / bs, bs>>>(a, b, c, n);
cudaDeviceSynchronize();
t = get_time() - t;
printf("%.2f ms bw %.3f GB/s\n", t * 1000,
n * 2.0f * 4 / t / (1024 * 1024 * 1024.0f));
}
std::cout << std::endl;
}
}
|
9,263 | #include <stdio.h>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <string.h>
__global__ void
cuda_rot13(char * str, int numElements){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int case_type;
if (i < numElements){
if (str[i] < 'A' || (str[i] > 'Z' && str[i] < 'a') || str[i] > 'z')
return;
// Determine if the char is upper or lower case.
if (str[i] >= 'a')
case_type = 'a';
else
case_type = 'A';
// Rotate the char's value, ensuring it doesn't accidentally "fall off" the end.
str[i] = (str[i] + 13) % (case_type + 26);
if (str[i] < 26)
str[i] += case_type;
}
}
void rot13_encrypt(char h_t[]){
cudaError_t err = cudaSuccess;
int numElements = strlen(h_t);
size_t size = numElements * sizeof(char);
// Alocacao do vetor do device
char * d_t = NULL;
err = cudaMalloc((void **)&d_t, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Falha ao alocar vetor do device (texto original) (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Copia do vetor com o texto original do host para o do device
printf("Copiando texto do host pro device\n");
err = cudaMemcpy(d_t, h_t, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Falha ao copiar vetor c texto original do host para o device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Rodando o algoritmo de encriptacao
// O tamanho do bloco eh determinado pela funcao cudaOccupancyMaxPotentialBlockSize
// das ferramentas do CUDA.
// O tamanho maximo do grid eh de 65535
int minGridSize, blockSize, gridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cuda_rot13, 0, numElements);
gridSize = (numElements + blockSize - 1) / blockSize;
if (gridSize > 65535) gridSize = 65535;
printf("CUDA kernel launch with %d blocks of %d threads\n", gridSize, blockSize);
cuda_rot13<<<gridSize, blockSize>>>(d_t, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Erro ao rodar kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copia o resultado do device para o host
printf("Copiando o texto do device para o host\n");
err = cudaMemcpy(h_t, d_t, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Erro ao copiar do device pro host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Liberando o texto do device
err = cudaFree(d_t);
if (err != cudaSuccess)
{
fprintf(stderr, "Erro ao liberar o texto do device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Resetando o device
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Erro ao resetar o device! (error=%s\n)!", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("\nDone\n");
}
int rot13_test(char * name)
{
char * text, *o_text;
int pass = 1, fsize;
//char dec_exp[] = {"NOPQRSTUVWXYZABCDEFGHIJKLMnopqrstuvwxyzabcdefghijklm"};
FILE *f;
char * filename = (char*) malloc (100 * sizeof(char));
strcpy(filename, name);
f = fopen(filename, "r");
if (f){
fseek(f, 0, SEEK_END);
fsize = ftell(f);
rewind(f);
text = (char *) malloc (fsize * sizeof (char));
o_text = (char *) malloc (fsize * sizeof (char));
fread(o_text, 1, fsize, f);
fclose(f);
}
else{
fprintf(stderr, "Erro ao abrir arquivo!\n");
exit(EXIT_FAILURE);
}
strcpy(text, o_text);
rot13_encrypt(text);
//pass = pass && !strcmp(text, dec_exp);
//printf("%s\n", pass ? "DEC SUCCEEDED" : "DEC FAILED");
rot13_encrypt(text);
pass = pass && !strcmp(text, o_text);
return(pass);
}
int main(int argc, char ** argv)
{
printf("CUDA ROT-13 tests: %s\n", rot13_test(argv[1]) ? "SUCCEEDED" : "FAILED");
return(0);
}
|
9,264 | /////////////////////////
// streamedMult.cu //
// Andrew Krepps //
// Module 7 Assignment //
// 3/26/2018 //
/////////////////////////
#include <stdio.h>
#include <stdlib.h>
///////////////////////////////////////////////////////////////////////////////
/// \brief perform element-wise array multiplication
///
/// \param [in] in1 the first input array
/// \param [in] in2 the second input array
/// \param [out] out the output array
/// \param [in] n the number of array elements
///////////////////////////////////////////////////////////////////////////////
__global__
void arrayMult(const float* in1, const float* in2, float* out, const unsigned int n)
{
unsigned int dataIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (dataIdx < n) {
out[dataIdx] = in1[dataIdx]*in2[dataIdx];
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief initialize host array data
///
/// \param [out] a the data array
/// \param [in] n the number of array elements
///////////////////////////////////////////////////////////////////////////////
void initHostArray(float* a, const unsigned int n)
{
for (unsigned int i = 0; i < n; ++i) {
a[i] = (float)i;
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief verify output data
///
/// This assumes both input arrays were initialized using initHostArray.
///
/// \param [in] a the output array
/// \param [in] n the number of array elements
///////////////////////////////////////////////////////////////////////////////
void verifyResult(float* a, const unsigned int n)
{
for (unsigned int i = 0; i < n; ++i) {
float expected = (float)i * (float)i;
if (a[i] != expected) {
printf("Error! a[%d] != %f\n", expected);
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief perform element-wise array multiplication while evenly splitting
/// work among concurrent streams
///
/// \param [in] numBlocks the number of thread blocks (total)
/// \param [in] blockSize the number of threads per block
/// \param [in] numStreams the number of streams
///
/// \returns the total GPU memory copy and execution time (in ms)
///////////////////////////////////////////////////////////////////////////////
float runStreamedArrayMult(const unsigned int numBlocks, const unsigned int blockSize, const unsigned int numStreams)
{
// calculate data size
const unsigned int n = numBlocks*blockSize;
const unsigned int numBytes = n*sizeof(float);
// evenly split blocks among streams
const unsigned int blocksPerStream = numBlocks/numStreams;
const unsigned int elementsPerStream = n/numStreams;
const unsigned int bytesPerStream = elementsPerStream*sizeof(float);
// allocate and initialize pinned host memory
float* in1;
float* in2;
float* out;
cudaMallocHost((void**)&in1, numBytes);
cudaMallocHost((void**)&in2, numBytes);
cudaMallocHost((void**)&out, numBytes);
initHostArray(in1, n);
initHostArray(in2, n);
// allocate device memory
float* d_in1;
float* d_in2;
float* d_out;
cudaMalloc((void**)&d_in1, numBytes);
cudaMalloc((void**)&d_in2, numBytes);
cudaMalloc((void**)&d_out, numBytes);
// create streams
cudaStream_t* streams = (cudaStream_t*) malloc(numStreams*sizeof(cudaStream_t));
for (unsigned int i = 0; i < numStreams; ++i) {
cudaStreamCreate(&streams[i]);
}
// create timing events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start timer
cudaEventRecord(start, 0);
// execute kernels (data is split evenly among streams)
for (unsigned int i = 0; i < numStreams; ++i) {
const unsigned int startIdx = i*elementsPerStream;
cudaMemcpyAsync(d_in1 + startIdx, in1 + startIdx, bytesPerStream, cudaMemcpyHostToDevice, streams[i]);
cudaMemcpyAsync(d_in2 + startIdx, in2 + startIdx, bytesPerStream, cudaMemcpyHostToDevice, streams[i]);
arrayMult<<<blocksPerStream, blockSize, 0, streams[i]>>>(d_in1 + startIdx, d_in2 + startIdx, d_out + startIdx, elementsPerStream);
cudaMemcpyAsync(out + startIdx, d_out + startIdx, bytesPerStream, cudaMemcpyDeviceToHost, streams[i]);
}
// stop timer and wait for GPU to finish
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculate execution time (in ms)
float ms;
cudaEventElapsedTime(&ms, start, stop);
// verify output
verifyResult(out, n);
// free events
cudaEventDestroy(start);
cudaEventDestroy(stop);
// free streams
for (unsigned int i = 0; i < numStreams; ++i) {
cudaStreamDestroy(streams[i]);
}
free(streams);
// free device memory
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out);
// free pinned host memory
cudaFreeHost(in1);
cudaFreeHost(in2);
cudaFreeHost(out);
// return execution time (in ms)
return ms;
}
int main(int argc, char** argv)
{
// configure run
unsigned int numBlocks = 512;
unsigned int blockSize = 256;
unsigned int numStreams = 1;
if (argc > 1) {
numBlocks = atoi(argv[1]);
}
if (argc > 2) {
blockSize = atoi(argv[2]);
}
if (argc > 3) {
numStreams = atoi(argv[3]);
}
// dummy execution to avoid startup performance hits
runStreamedArrayMult(numBlocks, blockSize, numStreams);
// run experiment and display timing results
float ms = runStreamedArrayMult(numBlocks, blockSize, numStreams);
printf("GPU execution time (%d streams): %.3f ms\n", numStreams, ms);
return EXIT_SUCCESS;
}
|
9,265 | #include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
void StartKernelTiming(cudaEvent_t& tic, cudaEvent_t& toc, cudaStream_t iStream);
void StopKernelTiming(cudaEvent_t& tic, cudaEvent_t& toc, cudaStream_t iStream, float* ptimer);
__global__ void vecMat1(double *_dst, double* _mat, double* _v, int _w, int _h );
int main(int argc , char *argv[])
{
if(argc != 3)
{
printf("\n Usage: %s <HEIGHT> <WIDTH> \n",argv[0]);
return 1;
}
int h=atoi(argv[1]);
int w=atoi(argv[2]);
int n;
const unsigned int THREADS_PER_BLOCK = 512;
double *hostMat = (double*) calloc(h*w, sizeof(double));
double *hostVec = (double*) calloc(w, sizeof(double));
double *hostResVec = (double*) calloc(w, sizeof(double));
for(n=0;n<h*w;++n){
hostMat[n]=rand() % 100;
}
for(n=0;n<w;++n)
{
hostVec[n] = rand() % 100;
}
// allocate memory
double *gpuMat, *gpuVec, *gpuResVec;
cudaMalloc( (void**)&gpuMat, w*h* sizeof(double) );
cudaMalloc( (void**)&gpuVec, w * sizeof(double) );
cudaMalloc( (void**)&gpuResVec, h * sizeof(double) );
// upload M and x
cudaMemcpy( gpuMat, (void*) hostMat, w*h * sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy( gpuVec, (void*) hostVec, w * sizeof(double),cudaMemcpyHostToDevice );
// compute the block and grid dimensions
dim3 threadBlock( THREADS_PER_BLOCK, 1 );
const unsigned int numBlocks = (n - 1)/THREADS_PER_BLOCK + 1;
dim3 blockGrid( numBlocks, 1, 1);
//xronometrhsh
cudaEvent_t tic, toc;
float elapsed_time = 0.f;
StartKernelTiming(tic, toc, 0);
vecMat1<<< blockGrid, threadBlock >>>( gpuResVec, gpuMat, gpuVec,w,h);
cudaThreadSynchronize() ;
// download result y
cudaMemcpy( hostResVec, gpuResVec, h * sizeof(double), cudaMemcpyDeviceToHost) ;
StopKernelTiming(tic,toc, 0, &elapsed_time); /*telos xronometrhshs*/
/*int k=0;
for(k=0; k<h; k++)
{
printf("gpu: %f\n", hostResVec[k]);
/*}
/* convert from miliseconds to seconds */
elapsed_time /= 1000.0;
/* output elapsed time */
printf("elapsed time:%g sec \n", elapsed_time);
cudaFree( gpuMat );
cudaFree( gpuVec );
cudaFree( gpuResVec );
free(hostMat);
free(hostVec);
free(hostResVec);
}
__global__ void vecMat1(double *_dst, double* _mat, double* _v, int _w, int _h )
{
// row index the thread is operating on
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < _h) {
float res = 0.;
// dot product of one line
for (int j = 0; j < _w; ++j) {
res += _mat[i*_w + j] * _v[j];
}
// write result to global memory
_dst[i] = res;
}
}
void StartKernelTiming(cudaEvent_t& tic, cudaEvent_t& toc, cudaStream_t iStream)
{
cudaEventCreate(&tic);
cudaEventCreate(&toc);
cudaEventRecord(tic, iStream);
}
void StopKernelTiming(cudaEvent_t& tic, cudaEvent_t& toc, cudaStream_t iStream, float* ptimer)
{
float kt;
cudaEventRecord(toc, iStream);
cudaEventSynchronize(toc);
cudaEventElapsedTime(&kt, tic, toc);
cudaEventDestroy(tic); cudaEventDestroy(toc);
(*ptimer) += kt;
}
|
9,266 |
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <unistd.h>
#include <stdio.h>
__global__ //declare global so the computer knows to run this on gpu
void multiply(float *a, float *b, float *c, int aRow, int aCol,
int bCol) {
//nested for loop that "jumps" along blocks to perform multiplication
int index = threadIdx.x;
int stride = blockDim.x;
int block = blockIdx.x;
for(int i = 0; i < aRow; i += 1)
for(int j = index + (block * stride); j < bCol; j+= stride)
for(int k = 0; k < aCol; k += 1)
c[i*bCol + j] += a[i*aCol + k] * b[j + k*bCol];
__syncthreads();
return;
}
void scalar_multiply(float *a, float *b, float *c, int aRow, int aCol,
int bCol) {
//run without GPU work to use as error-checking
for(int i = 0; i < aRow; ++i)
for(int j = 0; j < bCol; ++j)
for(int k = 0; k < aCol; ++k)
c[i*bCol + j] += a[i*aCol + k] * b[j + k*bCol];
return;
}
int main(int argc, char *argv[])
{
if(argc < 5) {
std::cerr << "usage: " << argv[0] << " aRows aCols bRows bCols\n";
return(-1);
}
if(atoi(argv[2]) != atoi(argv[3])) {
std::cerr << "error! aCols must match bRows. " <<
argv[2] << ", " << argv[3] << std::endl;
return(-1);
}
srand(4); //so creative
int errorcheck = 0; //set to 1 to perform error checking
int threads = 512;
int blocks = 512;
int DEBUG = 0; //set to 1 to print debug messages
//accept 4 args: row, col, for a and b
float aRow = atoi(argv[1]);
float aCol = atoi(argv[2]);
float bRow = atoi(argv[3]);
float bCol = atoi(argv[4]);
float cRow = aRow;
float cCol = bCol;
float aSize = aRow * aCol * sizeof(float);
float bSize = bRow * bCol * sizeof(float);
float cSize = cRow * cCol * sizeof(float);
float *a = (float *)malloc(aSize);
float *b = (float *)malloc(bSize);
float *c = (float *)malloc(cSize);
float *cu_a;
float *cu_b;
float *cu_c;
//allocate memory on the GPU
cudaMalloc(&cu_a, aSize);
cudaMalloc(&cu_b, bSize);
cudaMalloc(&cu_c, cSize);
//initialize them to randoms
for(int i = 0; i < aRow*aCol; i++) {
a[i] = rand() % 1000;
}
for(int i = 0; i < bRow*bCol; i++) {
b[i] = rand() % 1000;
}
for(int i = 0; i < aRow*bCol; i++) {
c[i] = 0;
}
//put the data into the GPU
cudaMemcpy(cu_a, a, aSize, cudaMemcpyHostToDevice);
cudaMemcpy(cu_b, b, bSize, cudaMemcpyHostToDevice);
//warmup
std::cout << "warming up...\n";
multiply<<<blocks, threads>>>(cu_a, cu_b, cu_c, aRow, aCol, bCol);
std::cout << "done.\nrunning tests...\n";
if(errorcheck){
//fill c with an array computed on cpu
scalar_multiply(a, b, c, aRow, aCol, bCol);
}
double fulltime = 0;
int repeats = 4;
for(int i=0; i<repeats; i++) {
//repeat in case the matrix size is too small to time properly
std::clock_t start = std::clock();
multiply<<<blocks, threads>>>(cu_a, cu_b, cu_c, aRow, aCol, bCol);
cudaDeviceSynchronize();
std::clock_t end = std::clock();
cudaMemcpy(c, cu_c, cSize, cudaMemcpyDeviceToHost);
fulltime += (end - start);
}
float flops = (aRow*aCol*bCol*2);
double s_time = ((fulltime) / (double)(CLOCKS_PER_SEC));
std::cout << "a[" << aRow << "," << aCol << "], b[" << bRow << "," << bCol << "], c[" << cRow << "," << cCol << "]\n";
std::cout << "time: " << s_time*1000 << "ms\n";
std::cout << "performance: " << flops << " flops at " << (((float)flops / 1000000000) / ((s_time) / repeats)) << "GFlop/s\n";
if(DEBUG) {
//printout
for(int i=0; i<aRow * bCol; i++)
std::cerr << c[i] << " ";
std::cerr << std::endl;
}
//free GPU memory
cudaFree(cu_a);
cudaFree(cu_b);
cudaFree(cu_c);
//free CPU memory
free(a);
free(b);
free(c);
return 0;
}
|
9,267 | #include "includes.h"
__global__ void fourier_transform(float *in, float *out, int height, int width, int blockConfig) {
// block elements and function variables
int my_x, k, t;
my_x = blockIdx.x * blockDim.x + threadIdx.x;
// iterate through each element, going from frequency to time domain
for (k = 0; k < height; k++) {
// difference, which will be used to subtract off
float realSum = 0.0;
// iterate through the input element
for (t = 0; t < width; t++) {
// calculate the angle and update the sum
float angle = 2 * M_PI * (my_x * height + t) * (my_x * width + k) / height;
realSum += in[my_x * height + t] * cos(angle);
}
// each output element will be the current sum for that index
out[my_x * height + k] = realSum;
}
} |
9,268 | #include <stdio.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <iostream>
#include <time.h>
#include <thread>
#include <vector>
using namespace std;
#define DSIZE 20000000
bool cuda_init(){
int count;
cudaGetDeviceCount(&count);
cout<<"cuda device count:"<<count<<endl;
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,0)==cudaSuccess){
printf( " --- General Information for device %d ---\n", 0 );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", 0 );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", 0 );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
return true;
}
void initData(int data[]){
for(int i=0; i<DSIZE; i++){
data[i] = rand()%100;
}
}
__global__ void sumGPU(int dataGPU[], int* result /*return val*/){
//tid: current id of a thread
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
for(int i= tid ; i < DSIZE; i+= gridDim.x*blockDim.x) {
sum += dataGPU[i]*dataGPU[i];
}
result[tid] = sum;
}
//one thread
int sumCPU(int data[]){
clock_t start, end; start= clock();
int sum=0;
for(int i=0; i<DSIZE; i++){
sum += data[i]*data[i];
}
end=clock();
cout<<"CPU running time(ms):"<<(double)(end-start)*1000.0f/CLOCKS_PER_SEC<<endl;
cout<<"CPU one-thread result:"<<sum<<endl;
return sum;
}
/////////////////////// main program ////////////////
int main(){
//cuda_init();
int* data = new int[DSIZE];
initData(data);
cout<<"\n******** CPU ***********"<<endl;
sumCPU(data);
/////// GPU job //////////
cout<<"\n******** GPU ***********"<<endl;
dim3 blocksize(256);
dim3 gridsize(16);
cout<<"block size:"<<blocksize.x<<endl;
cout<<"grid size:"<<gridsize.x<<endl;
clock_t start, end; start= clock();
//int dataGPU[DSIZE]; //this is wrong, cpu will allocate memory from stack
int* dataGPU;
int result_size = blocksize.x*gridsize.x;
int* result;
cudaMalloc((void**)&dataGPU,sizeof(int)*DSIZE);
cudaMalloc((void**)&result, sizeof(int)*result_size);
cudaMemcpy(dataGPU,data,sizeof(int)*DSIZE,cudaMemcpyHostToDevice);
sumGPU<<<gridsize, blocksize>>>(dataGPU,result);
int psum [blocksize.x*gridsize.x];
cudaMemcpy(psum,result,sizeof(int)*blocksize.x*gridsize.x,cudaMemcpyDeviceToHost);
int sum=0;
for(int i=0; i<blocksize.x*gridsize.x; i++){
sum += psum[i];
}
cout<<"GPU result:"<<sum<<endl;
end=clock();
cout<<"GPU running time(ms):"<<(double)(end-start)*1000.0f/CLOCKS_PER_SEC<<endl;
}
|
9,269 | #include <stdio.h>
int main(int argc, char *argv[]){
int *mem,*mem2;
int i;
size_t avail, total;
cudaMemGetInfo(&avail, &total);
printf( "total available memory: %ld\n" ,avail / 1024 / 1024);
long long size=(long)1024*1024*1024*atoi(argv[1]);
long long size2=(long)1024*1024*atoi(argv[2]);
cudaMalloc(&mem,avail-(long)1024*1024*1024*atoi(argv[1])-(long)1024*1024*atoi(argv[2]));
//cudaMalloc(&mem2,size2);
// size_t avail, total;
cudaMemGetInfo(&avail, &total);
printf( "available memory: %ld\n" ,avail / 1024 / 1024);
printf("Press Enter key to continue...");
fgetc(stdin);
}
|
9,270 | #include <iostream>
#define N 2048*2048
#include <chrono>
void dot(int *a, int *b, int *c){
int sum = 0;
for(int i = 0; i < N; i++){
sum += a[i] * b[i];
}
*c = sum;
}
int main(){
int *a = new int[N];
int *b = new int[N];
int *c = new int;
*c = 0;
for(int i = 0; i < N; i++){
a[i] = 1;
b[i] = 1;
}
auto start = std::chrono::high_resolution_clock::now();
dot(a, b, c);
auto end = std::chrono::high_resolution_clock::now();
double time_taken = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
time_taken *= 1e-9;
std::cout << "dot product: " << *c << std::endl;
std::cout << "Time taken by program is : " << time_taken;
std::cout << " sec" << std::endl;
delete(a);
delete(b);
delete(c);
return 0;
} |
9,271 | #include "includes.h"
__global__ void toOneChannel(unsigned char *data, int width, int height, int components)
{
int column = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= height || column >= width)
return;
unsigned char * threadData = data + (components * (column + row * width));
for (int i = 0; i < components - 1; i++)
{
threadData[i] = 0;
}
} |
9,272 | /*
Copied From Thrust Library v-1.3.0
and refactored a little bit.
Akira Hayakawa, 2010
*/
#include <cstdio>
#include <cstdlib>
#include <cuda_runtime_api.h>
void usage(const char *name){
printf("usage: %s [device_id]\n", name);
}
int main(int argc, char **argv){
int num_devices = 0;
int device_id = 0;
if(argc != 2){
usage(argv[0]);
exit(-1);
}
device_id = atoi(argv[1]);
cudaGetDeviceCount(&num_devices);
if(device_id >= num_devices){
printf("No available device with id %d\n", device_id);
return -1;
}
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, device_id);
printf("sm_%d%d", properties.major, properties.minor);
return 0;
}
|
9,273 | //
// Created by lidan on 22/10/2020.
//
#include <cuda_runtime.h>
#include <string>
#include <iostream>
#include <stdlib.h>
cudaError_t cuda();
__global__ void kernel(){
}
template<typename op>
__device__ op _clamp(op x, op a, op b)
{
return max(a,min(x,b)) ;
}
__device__ int rgbToInt(float r, float g, float b)
{
r = _clamp<float>(r, 0.0f, 255.0f);
g = _clamp<float>(g, 0.0f, 255.0f);
b = _clamp<float>(b, 0.0f, 255.0f);
return (int(b) << 16) | (int(g) << 8) | int(r);
}
// make uchar4 for write char4
__global__ void
cudaRender(unsigned int *g_odata,int imgw)
{
extern __shared__ uchar4 sdata[] ;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int bx = blockDim.x ;
int by = blockDim.y ;
int x = tx + blockIdx.x*bx ;
int y = ty + blockIdx.y*by ;
uchar4 c4 = make_uchar4((x & 0x20) ? 100 : 0, 0, (y & 0x20) ? 100 : 0, 0);
g_odata[y*imgw+x] = rgbToInt(c4.z, c4.y, c4.x);
}
struct cuComplex{
float r;
float i;
__device__ cuComplex(float x,float y) : r(x),i(y){}
__device__ float manitude2(void)
{
return r*r + i* i ;
}
__device__ cuComplex operator*(const cuComplex& a)
{
return cuComplex(r*a.r-i*a.i ,i*a.r + r* a.i ) ;
}
__device__ cuComplex operator+(const cuComplex& a)
{
return cuComplex(r+a.r,i+a.i ) ;
}
};
__device__ int julia(int src_width , int src_height, int x,int y,float scale)
{
float jx = scale * (float) (src_width/2- x)/(src_width/2) ;
float jy = scale * (float) (src_height/2 - y)/(src_height/2) ;
cuComplex c(-0.8, 0.156) ;
cuComplex d(jx,jy) ;
// for(int i = 0 ;i <200 ;i++)
// {
// d = d*d + c;
// if(d.manitude2() > 1000)
// {
// return 0 ;
// }
// }
//
// return 1 ;
int iterations = 0;
while (true) {
iterations++;
if (iterations >1000) return 0;
d = d*d + c;
if (d.i > 150) return iterations;
if (d.r > 150) return iterations;
}
return iterations ;
}
__global__ void kernel(const unsigned int width,const unsigned int height ,unsigned char* ptr,float scale )
{
const unsigned int idx = (blockIdx.x* blockDim.x) + threadIdx.x ;
const unsigned int idy = (blockIdx.y*blockDim.y) + threadIdx.y ;
const unsigned int tid = idy*gridDim.x*blockDim.x + idx ;
int x = tid / width ;
int y = tid % width ;
if(x < height)
{
int juliaValue = julia(width,height,x,y,scale) ;
ptr[(x*width+y)*4 + 0] = 190+120*juliaValue;
ptr[(x*width+y)*4 + 1] = 40+35*round(cos(juliaValue/5.0));
ptr[(x*width+y)*4 + 2] = 18+6*(juliaValue%10);
ptr[(x*width+y)*4 + 3] = 255;
}
}
extern "C" void
launch_cudaRender(dim3 grid, dim3 block, int sbytes, unsigned char *g_odata, int imgw)
{
// cudaRender <<< grid, block, sbytes >>>(g_odata, imgw);
kernel<<< grid, block, sbytes >>>(imgw,imgw,g_odata, 1.2);
}
|
9,274 | #include "parameters.cuh"
__host__ __device__ Parameters::Parameters(int max_depth, int min_points_per_node):point_selector(0), num_nodes_at_this_level(1), depth(0), max_depth(max_depth), min_points_per_node(min_points_per_node){}
__host__ __device__ Parameters::Parameters(const Parameters& params, bool):
point_selector((params.point_selector+1)%2),
num_nodes_at_this_level(4*params.num_nodes_at_this_level),
depth(params.depth+1),
max_depth(params.max_depth),
min_points_per_node(params.min_points_per_node) {}
|
9,275 | #include <stdio.h>
#include <math.h>
#define BLOCK_SIZE 1024
#define N 2048
__global__ void interleaved_reduce(int *d_in, int *d_out){
int i = (blockIdx.x*blockDim.x)+threadIdx.x ;
int M = ceilf(N/2.0f) ;
__shared__ int shareMem[N] ;
shareMem[i] = d_in[i] ;
__syncthreads() ;
for(int s=1; s<=N; s=s*2){
if(i<M && i){
//printf("s = %d, thread = %d\n", s, i) ;
shareMem[(2*s)*i] = d_in[(2*s)*i] + d_in[(2*s)*i+s] ;
}
__syncthreads() ;
M = ceilf(M/2.0f) ;
}
if(i == 0){
d_out[0] = shareMem[0] ;
}
}
__global__ void contiguous_reduce(int *d_in, int *d_out){
int i = (blockIdx.x*blockDim.x)+threadIdx.x ;
int M = ceilf(N/2.0f) ;
__shared__ int shareMem[N] ;
shareMem[i] = d_in[i] ;
__syncthreads() ;
for(int s=M; s>0; s=s/2){
if(i<M){
//printf("s = %d, thread = %d\n", s, i) ;
shareMem[i] = shareMem[i] + shareMem[i+s] ;
}
__syncthreads() ;
M = ceilf(M/2.0f) ;
}
if(i == 0){
d_out[0] = shareMem[0] ;
}
}
__global__ void setShareMem(int *d_in){
int i = (blockIdx.x*blockDim.x)+threadIdx.x ;
//shareMem[i] = d_in[i] ;
}
int main(){
int h_in[N] ;
int h_out ;
for(int i=0; i<N; i++){
h_in[i] = 1 ;
//shareMem[i] = 1 ;
}
int *d_in, *d_out ;
cudaMalloc((void**) &d_in, N*sizeof(int)) ;
cudaMalloc((void**) &d_out, N*sizeof(int)) ;
cudaMemcpy(d_in, &h_in, N*sizeof(int), cudaMemcpyHostToDevice) ;
cudaEvent_t start, stop ;
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
int numBlock = N/(BLOCK_SIZE*2) ;
if(N%(BLOCK_SIZE*2)){
numBlock++ ;
}
printf("numBlock=%d\n", numBlock) ;
//setShareMem<<<numBlock, BLOCK_SIZE>>>(d_in) ;
cudaEventRecord(start) ;
interleaved_reduce<<<1, BLOCK_SIZE>>>(d_in, d_out) ;
//contiguous_reduce<<<numBlock, BLOCK_SIZE>>>(d_in, d_out) ;
cudaEventRecord(stop) ;
cudaEventSynchronize(stop) ;
float millisec = 0 ;
cudaEventElapsedTime(&millisec, start, stop) ;
cudaMemcpy(&h_out, d_out, sizeof(int), cudaMemcpyDeviceToHost) ;
cudaFree(d_in) ;
cudaFree(d_out) ;
printf("Output: %d\n", h_out) ;
printf("Time used: %f\n", millisec) ;
}
|
9,276 | /* Andrew Miller <amiller@dappervision.com>
*
* Cuda 512*512*512*4bytes test
*
* According to the KinectFusion UIST 2011 paper, it's possible
* to do a sweep of 512^3 voxels, 32-bits each, in ~2ms on a GTX470.
*
* This code is a simple benchmark accessing 512^3 voxels. Each
* voxel has two 16-bit components. In this benchmark kernel, we
* simply increment these values by a constant K. More than anything
* it's a test of the memory bandwidth.
*
* On my GTX470 card, this kernel takes 10.7ms instead of ~2ms. Is there
* a faster way to do this?
*
* Citation: http://dl.acm.org/citation.cfm?id=2047270
* Public gdocs pdf link: http://tinyurl.com/6xlznbx
*/
#include <stdio.h>
#include <cuda.h>
#include <sys/time.h>
#include <assert.h>
struct Voxel {
short int sd;
short int w;
};
const int N_BYTES = (512*512*512*4);
const int N_LOOPS = 10;
const int K = 7;
__global__ void incr_tsdf(Voxel *vox, Voxel *out)
{
int idx = blockIdx.x*512 + threadIdx.x;
for (int z = 0; z < 512; z++) {
out[idx].sd = vox[idx].sd + K;
out[idx].w = vox[idx].w += K;
idx += 512*512;
}
}
int main(void) {
Voxel *vox_gpu;
Voxel *vox_gpuA;
Voxel *vox_cpu;
cudaMalloc((void **) &vox_gpu, N_BYTES);
cudaMalloc((void **) &vox_gpuA, N_BYTES);
vox_cpu = (Voxel *) calloc(N_BYTES, 1);
cudaMemcpy(vox_gpu, vox_cpu, N_BYTES, cudaMemcpyHostToDevice);
dim3 dimBlock(512,1,1);
dim3 dimGrid(512,1,1);
cudaEvent_t e_start, e_stop;
cudaEventCreate(&e_start);
cudaEventCreate(&e_stop);
cudaEventRecord(e_start);
for (int i = 0; i < N_LOOPS; i++) {
incr_tsdf<<<dimGrid, dimBlock>>>(vox_gpu, vox_gpuA);
}
cudaEventRecord(e_stop);
cudaEventSynchronize(e_stop);
float ms;
cudaEventElapsedTime(&ms, e_start, e_stop);
// Copy back to the host and check we have what we expect
cudaMemcpy(vox_cpu, vox_gpu, N_BYTES, cudaMemcpyDeviceToHost);
for (int i = 0; i < 512; i++) {
for (int j = 0; j < 512; j++) {
for (int k = 0; k < 512; k++) {
int idx = i*512*512 + j*512 + k;
assert(vox_cpu[idx].sd == (short)N_LOOPS*K);
assert(vox_cpu[idx].w == (short)N_LOOPS*K);
}
}
}
printf("%d sweeps of %.1f megavoxels in %.1fms (avg %.1fms)\n",
N_LOOPS, N_BYTES/4.0/1000.0/1000.0, ms, ms/N_LOOPS);
cudaFree(vox_gpu);
free(vox_cpu);
return 0;
}
|
9,277 | #include<cuda.h>
#include <stdio.h>
#include<numeric>
#define SIZE 16
__global__ void para_max(int *input)
{
int tid=threadIdx.x;
int step_size=1;
int no_of_thread=blockDim.x;
while(no_of_thread>0)
{
if(tid<no_of_thread)
{
int fst=tid*step_size*2;
int snd=fst+step_size;
if(input[fst]<input[snd])
input[fst]=input[snd];
}
step_size <<=1;
no_of_thread >>=1;
}
}
__global__ void para_min(int *input)
{
int tid=threadIdx.x;
int step_size=1;
int no_of_thread=blockDim.x;
while(no_of_thread>0)
{
if(tid<no_of_thread)
{
int fst=tid*step_size*2;
int snd=fst+step_size;
if(input[fst]>input[snd])
input[fst]=input[snd];
}
step_size <<=1;
no_of_thread >>=1;
}
}
__global__ void para_add(int *input)
{
int tid=threadIdx.x;
int step_size=1;
int no_of_thread=blockDim.x;
while(no_of_thread>0)
{
if(tid<no_of_thread)
{
int fst=tid*step_size*2;
int snd=fst+step_size;
input[fst]+=input[snd];
}
step_size <<=1;
no_of_thread >>=1;
}
}
__global__ void para_avg(int *input)
{
int tid=threadIdx.x;
int step_size=1;
int no_of_thread=blockDim.x;
while(no_of_thread>0)
{
if(tid<no_of_thread)
{
int fst=tid*step_size*2;
int snd=fst+step_size;
input[fst]+=input[snd];
}
step_size <<=1;
no_of_thread >>=1;
}
input[0]=input[0]/SIZE;
}
int main(void)
{
int i;
int result;
int *dev_a;
cudaMalloc(&dev_a, SIZE*sizeof(int));
int a[]={1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16};
printf("Vector is: ");
for(int i=0;i<SIZE;i++)
printf("%d ",a[i]);
cudaMemcpy(dev_a,a,sizeof(a),cudaMemcpyHostToDevice);
para_max<<<1, SIZE/2>>>(dev_a);
cudaMemcpy(&result,dev_a,sizeof(result),cudaMemcpyDeviceToHost);
printf("\n Max is: ");
printf("%d\n",result);
cudaMemcpy(dev_a,a,sizeof(a),cudaMemcpyHostToDevice);
para_min<<<1, SIZE/2>>>(dev_a);
cudaMemcpy(&result,dev_a,sizeof(result),cudaMemcpyDeviceToHost);
printf(" Min is: ");
printf("%d\n",result);
cudaMemcpy(dev_a,a,sizeof(a),cudaMemcpyHostToDevice);
para_add<<<1, SIZE/2>>>(dev_a);
cudaMemcpy(&result,dev_a,sizeof(result),cudaMemcpyDeviceToHost);
printf(" Sum is: ");
printf("%d\n",result);
cudaMemcpy(dev_a,a,sizeof(a),cudaMemcpyHostToDevice);
para_avg<<<1, SIZE/2>>>(dev_a);
cudaMemcpy(&result,dev_a,sizeof(result),cudaMemcpyDeviceToHost);
printf(" Avg is: ");
printf("%d\n",result);
cudaFree(dev_a);
return 0;
}
|
9,278 | #include <iostream>
#include <cstdlib>
#include <sys/time.h>
using namespace std;
const int BLOCKSIZE = 32;
// Global variables to calculate sizes
int *sizes_gpu, *subtotals_gpu, *total_gpu; // For finding sums
// Stopwatch class
class Stopwatch {
private:
timeval initialTime;
public:
Stopwatch() {
reset();
}
void reset() {
gettimeofday(&initialTime, 0);
}
float elapsed() const {
timeval currentTime;
gettimeofday(¤tTime, 0);
return (float) (currentTime.tv_sec - initialTime.tv_sec) + ((float) (currentTime.tv_usec - initialTime.tv_usec))/1000000;
}
};
// Create an TownSet of size n with all false entries
void createTownSet(char *current, int n) {
char temp[n];
for (int i=0; i<n; i++)
temp[i] = 0;
cudaMemcpy(¤t, &temp, n, cudaMemcpyHostToDevice);
}
// Take a union of current_gpu and with specified townset and store it in current_gpu
__global__ void unionTownSet_gpu(char *current, char *townSets, int i, int n) {
int threadId = blockIdx.x*blockDim.x + threadIdx.x;
current[threadId] += townSets[i*n+threadId];
}
void unionTownSet(char *current, char *townSets, int i, int n) {
unionTownSet_gpu<<<n/BLOCKSIZE, BLOCKSIZE>>>(current, townSets, i, n);
}
// Find the total population in all of the town sets
__global__ void sizeTownSet_gpu(char *current, int *pops, int n, int *sizes) {
int threadId = blockIdx.x*blockDim.x + threadIdx.x;
if (current[threadId] == 1)
sizes[threadId] = pops[threadId];
else
sizes[threadId] = 0;
}
__global__ void totals_gpu(int *output, int *input) {
__shared__ int temp[BLOCKSIZE];
int threadId = blockIdx.x*blockDim.x + threadIdx.x;
temp[threadIdx.x] = input[threadId];
__syncthreads();
for (int s=blockDim.x/2; s>0; s>>=1) {
if (threadIdx.x < s)
temp[threadIdx.x] += temp[threadIdx.x+s];
__syncthreads();
}
if (threadIdx.x == 0)
output[blockIdx.x] = temp[0];
}
int sizeTownSet(char *current, int *pops, int n) {
sizeTownSet_gpu<<<n/BLOCKSIZE, BLOCKSIZE>>>(current, pops, n, sizes_gpu);
totals_gpu<<<n/BLOCKSIZE, BLOCKSIZE>>>(subtotals_gpu, sizes_gpu); // Calculate subtotals
totals_gpu<<<n/BLOCKSIZE, BLOCKSIZE>>>(total_gpu, subtotals_gpu); // Calculate subtotals
int result;
cudaMemcpy(total_gpu, &result, sizeof(int), cudaMemcpyDeviceToHost);
return result;
}
// Count number of towns in a set
__global__ void countTownSet_gpu(char *current, int n, int *sizes) {
int threadId = blockIdx.x*blockDim.x + threadIdx.x;
if (current[threadId] == 1)
sizes[threadId] = 1;
else
sizes[threadId] = 0;
}
int countTownSet(char *current, int n) {
countTownSet_gpu<<<n/BLOCKSIZE, BLOCKSIZE>>>(current, n, sizes_gpu);
totals_gpu<<<n/BLOCKSIZE, BLOCKSIZE>>>(subtotals_gpu, sizes_gpu); // Calculate subtotals
totals_gpu<<<n/BLOCKSIZE, BLOCKSIZE>>>(total_gpu, subtotals_gpu); // Calculate subtotals
int result;
cudaMemcpy(total_gpu, &result, sizeof(int), cudaMemcpyDeviceToHost);
return result;
}
int main(int argc, char **argv) {
if (argc != 4) {
cout << "Usage: " << argv[0] << " mode size reps" << endl;
cout << " Mode is one of the following:" << endl;
cout << " a test full algorithm" << endl;
cout << " u test taking unions" << endl;
cout << " s test calculating sizes" << endl;
exit(0);
}
int size = atoi(argv[2]);
int reps = atoi(argv[3]);
char mode = argv[1][0];
// Initialize data
char townSets[size*size];
for (int i=0; i<size; i++) {
for (int j=0; j<size; j++) {
if (rand() < .1*RAND_MAX)
townSets[i*size+j] = 1;
else
townSets[i*size+j] = 0;
}
townSets[i*size+i] = 1;
}
int pops[size];
int totalPop = 0;
for (int i=0; i<size; i++) {
pops[i] = rand() % 256;
totalPop += pops[i];
}
// Perform the tests
int rounds, totalRounds;
Stopwatch timer;
// Copy the data to the GPU
char *townSets_gpu;
int *pops_gpu;
char *current_gpu; // Stores working townset
cudaMalloc((void**) &townSets_gpu, size*size);
cudaMemcpy(townSets_gpu, townSets, size*size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &pops_gpu, size*sizeof(int));
cudaMemcpy(pops_gpu, pops, size*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**) ¤t_gpu, size);
cudaMalloc((void**) &sizes_gpu, size);
cudaMalloc((void**) &subtotals_gpu, size/BLOCKSIZE);
cudaMalloc((void**) &total_gpu, 1);
// Do the simulation
switch (mode) {
case 'a':
totalRounds=0;
for (int r=0; r<reps; r++) {
createTownSet(current_gpu, size);
rounds = 0;
while (sizeTownSet(current_gpu, pops_gpu, size) <= totalPop/2) {
unionTownSet(current_gpu, townSets_gpu, rand()%size, size);
rounds++;
}
totalRounds += rounds;
}
cout << "Average number of rounds = " << ((float) totalRounds/reps) << endl;
break;
case 'u':
createTownSet(current_gpu, size);
for (int r=0; r<reps; r++)
unionTownSet(current_gpu, townSets_gpu, rand()%size, size);
break;
case 's':
int s;
for (int r=0; r<reps; r++)
s = sizeTownSet(current_gpu, pops_gpu, size);
break;
}
cout << "Elapsed time " << timer.elapsed() << endl;
}
|
9,279 | // ME 759 Spring 2021 Final Project
// driver.cu
// Author: Jason Zhou
#include <cstdio>
#include <random>
#include <iostream>
#include <string>
#include "BWTerrain.cuh"
int main(int argc, char* argv[]) {
// Note: Currently the minimum resolution tested to be working 0.05f
// Experiments have shown that when goes below 0.05f, Bulldozing algorithm has flaws
// declare a terrain with size 5 m x 5 m with resolution 0.05 m
BWTerrain terrain = BWTerrain(5.f, 5.f, 0.05f);
// declare a wheel with radius of 0.5 m, a width of 1 m, and a mass of 10 kg
BWWheel wheel = BWWheel(0.5f, 1.f, 10.f);
// initialize the wheel object
wheel.Initialize(1.0f, 2.5f, 0.5f);
// set z direction acceleration to -9.8, simulate gravity effect
wheel.acc_z = -9.8;
// single - wheel constant x direction velocity set to 0.5
wheel.vel_x = 0.5;
std::cout << "Terrain: " << terrain.Get_X_Size() << "," << terrain.Get_Y_Size() << "," << terrain.Get_Resolution()
<< std::endl;
std::cout << "Wheel: " << wheel.Get_R() << "," << wheel.Get_W() << std::endl;
// enable bulldozing effect - defaultly it's turned off
terrain.Set_Bulldozing(false);
// intialize the terrain
terrain.Initialize();
// perform simulation
// step size set to 0.01 s
// total simulation time is 5 s
for (int i = 0; i < 500; i++) {
wheel.Advance(0.01);
terrain.Advance(0.01, &wheel);
if (i < 10) {
terrain.WriteOutput("ter-000" + std::to_string(i));
wheel.WriteOutput("whe-000" + std::to_string(i));
} else if (i < 100) {
terrain.WriteOutput("ter-00" + std::to_string(i));
wheel.WriteOutput("whe-00" + std::to_string(i));
} else {
terrain.WriteOutput("ter-0" + std::to_string(i));
wheel.WriteOutput("whe-0" + std::to_string(i));
}
std::cout << "time: " << i * 0.01 << " s" << std::endl;
std::cout << "wheel pos:" << wheel.pos_x << "," << wheel.pos_y << "," << wheel.pos_z << std::endl;
std::cout << "wheel acc_z:" << wheel.acc_z << std::endl;
std::cout << "==============================================" << std::endl;
}
// safely free up all GPU memory
terrain.Destroy();
} |
9,280 | #include<cstdio>
#include<vector>
#include<string>
#include<cuda_runtime.h>
#include<thrust/fill.h>
#define BLOCK_SIZE 256
using namespace std;
float average(const vector<float> &timing) {
double avg = 0;
for(vector<float>::const_iterator it = timing.begin(); it != timing.end(); it++) avg += *it;
avg /= timing.size();
return avg;
}
void print_info(int *data, int len, string flag) {
printf("%s frist ten:\n", flag.c_str());
for (int i=0; i<10; i++){
printf("%d ", data[i]);
}
printf("\n");
printf("%s last ten:\n", flag.c_str());
for (int i=len -10; i<len; i++){
printf("%d ", data[i]);
}
printf("\n");
}
void assign_value(int *data, int len) {
for (int i=0; i<len; i++) data[i] = i % 256;
}
__global__ void naive_kernel(int *data, int *out, int len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
atomicAdd(&(out[data[tid]]), 1);
}
__global__ void histogram_kernel(int *data, int *out, int len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int thx = threadIdx.x;
extern __shared__ int sdata[];
sdata[thx] = 0;
__syncthreads();
if (tid<len) atomicAdd(&(sdata[data[tid]]), 1);
__syncthreads();
atomicAdd(&(out[thx]), sdata[thx]);
}
__global__ void histogram_kernel_multiple_read(int *data, int *out, int len, int stride) {
int tid = blockIdx.x * (blockDim.x * stride) + threadIdx.x;
int thx = threadIdx.x;
extern __shared__ int sdata[];
sdata[thx] = 0;
__syncthreads();
int end = min(tid + blockDim.x * stride, len);
for (int i=tid; i< end; i+=blockDim.x) {
atomicAdd(&(sdata[data[i]]), 1);
}
__syncthreads();
atomicAdd(&(out[thx]), sdata[thx]);
}
void cal_histogram(int *data, int *out, int len, int numThreads) {
int stride = 32;
int numblocks = (len + (numThreads*stride) -1) / (numThreads * stride);
// naive_kernel<<<numblocks, numThreads>>>(data, out, len);
// histogram_kernel<<<numblocks, numThreads, 256 * sizeof(int)>>>(data, out, len);
histogram_kernel_multiple_read<<<numblocks, numThreads, 256 * sizeof(int)>>>(data, out, len, stride);
}
void cal_histogram_cpu(int *data, int *out, int len) {
for (int i=0; i<len; i++) out[data[i]]++;
}
int main() {
int numThreads = BLOCK_SIZE;
int len = 512 * 1024 * 1024;
int *h_data = (int*)malloc(len * sizeof(int));
assign_value(h_data, len);
int *d_data;
cudaMalloc((void**)&d_data, len * sizeof(int));
cudaMemcpy(d_data, h_data, len * sizeof(int), cudaMemcpyHostToDevice);
int *result = (int*) malloc(256 * sizeof(int));
fill_n(result, 256, 0);
cal_histogram_cpu(h_data, result, len);
print_info(result, 256, "cpu result");
int *d_result;
cudaMalloc((void**)&d_result, 256 * sizeof(int));
// thrust::fill(d_result, d_result + 256, 0);
fill_n(result, 256, 0);
cudaMemcpy(d_result, result, 256 * sizeof(int), cudaMemcpyHostToDevice);
vector<float> times;
int loops = 1;
for (int i=0; i<loops; i++) {
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, NULL);
cal_histogram(d_data, d_result, len, numThreads);
cudaEventRecord(end, NULL);
cudaEventSynchronize(end);
float time = 0;
cudaEventElapsedTime(&time, start, end);
times.push_back(time);
}
printf("cal histogram avg time:%lf\n", average(times));
cudaMemcpy(result, d_result, 256 * sizeof(int), cudaMemcpyDeviceToHost);
print_info(result, 256, "gpu result");
cudaFree(d_data);
cudaFree(d_result);
free(h_data);
free(result);
} |
9,281 | #include "includes.h"
// curand
#define N 100
#define T 4
void llenarMatriz(int*);
__global__ void multiplicacion( int *a, int *b, int *c ) {
int i = threadIdx.x + blockIdx.x*blockDim.x; // 0 - 2047
int j = threadIdx.y + blockIdx.y*blockDim.y; // 0 - 2047
c[j+i*N] = 0; // 4,194,303
for(int k=0 ; k < N ; k++ ){
c[j+i*N] += a[k+i*N] * b[j+k*N];
}
} |
9,282 | #include "cuda_runtime.h"
#include "cuda.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cstring>
#include <time.h>
__global__ void adderboi(int * a, int * b, int * c)
{
int gid=threadIdx.x + blockIdx.x * blockDim.x;
c[gid] = a[gid] + b[gid];
}
int cpu_adder(int * a, int * b, int * c, int shape)
{
for(int i=0;i<shape;i++)
{
if(c[i]!=a[i]+b[i])
return 0;
}
return 1;
}
int main()
{
int shape=1<<22;
int size = shape*sizeof(int);
int b;
printf("Enter block size : ");
scanf("%d",&b);
dim3 grid(shape/b);
int * h_arr1;
int * h_arr2;
int * h_arr3;
h_arr1=(int *)malloc(size);
h_arr2=(int *)malloc(size);
h_arr3=(int *)malloc(size);
for(int i=0; i< shape; i++)
{
h_arr1[i]=(int)(rand() & 0x0f);
h_arr2[i]=(int)(rand() & 0x0f);
h_arr3[i]=0;
}
int * d_arr1;
int * d_arr2;
int * d_arr3;
cudaMalloc((int**)&d_arr1, size);
cudaMalloc((int**)&d_arr2, size);
cudaMalloc((int**)&d_arr3, size);
cudaMemcpy(d_arr1, h_arr1, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_arr2, h_arr2, size, cudaMemcpyHostToDevice);
adderboi <<<grid, b>>> (d_arr1, d_arr2, d_arr3);
cudaDeviceSynchronize();
cudaMemcpy(h_arr3, d_arr3, size, cudaMemcpyDeviceToHost);
printf(cpu_adder(h_arr1, h_arr2, h_arr3, shape)?"CPU and GPU values match\n":"CPU and GPU values donot match\n");
/*for(int i=0;i<shape;i++)
{
printf("%d\t+\t%d\t= %d\n", h_arr1[i], h_arr2[i], h_arr3[i]);
}*/
cudaFree(d_arr1);
cudaFree(d_arr2);
cudaFree(d_arr3);
free(h_arr1);
free(h_arr2);
free(h_arr3);
cudaDeviceReset();
} |
9,283 | #include "includes.h"
__global__ void matrixMult(int* m,int* n,int* p, int size)
{
int row=blockIdx.y*blockDim.y+threadIdx.y;
int col=blockIdx.x*blockDim.x+threadIdx.x;
int p_sum;
for(int i=0;i<size;i++){
p_sum += m[row*size+i] * n[col*size+i];
/*
si blockIdx.y= 0 entonces threadIdx.y se mueve::si blockIdx.x= 0 entonces threadIdx.x se mueve
*/
}
p[row*size+col]=p_sum;
} |
9,284 | #include <stdio.h>
#include <iostream>
#include <stdlib.h>
/**
* Computes the log of reaction rate.
* @param a: Pointer to coefficient matrix.
* @param temp: Pointer to temperature array.
* @param lam: Matrix to write the results to.
* @param nsets: Number of sets / number of rows in coefficient matrix.
* @param ncells: Number of cells / length of temperature array.
* @param ncoeff: Number of coefficients / number of columns in coefficient matrix.
*/
template <class dtype>
__device__ void rates(dtype *a, dtype *temp, dtype *lam, int nsets, int ncells, int ncoeff)
{
int istart = blockIdx.x * blockDim.x + threadIdx.x;
int istep = blockDim.x * gridDim.x;
int jstart = blockIdx.y * blockDim.y + threadIdx.y;
int jstep = blockDim.y * gridDim.y;
int kstart = blockIdx.z * blockDim.z + threadIdx.z;
int kstep = blockDim.z * gridDim.z;
for(int i = istart; i < nsets; i += istep)
{
for(int j = jstart; j < ncells; j += jstep)
{
for(int k = kstart; k < ncoeff; k += kstep)
{
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * temp[k * ncells + j]);
}
}
}
}
template <class dtype, int nsets, int ncells, int ncoeff>
__global__ void exec(dtype *lam)
{
int xInd = blockIdx.x * blockDim.x + threadIdx.x;
int xSize = blockDim.x * gridDim.x;
int yInd = blockIdx.y * blockDim.y + threadIdx.y;
int ySize = blockDim.y * gridDim.y;
int zInd = blockIdx.z * blockDim.z + threadIdx.z;
int zSize = blockDim.z * gridDim.z;
int ind = xInd * ySize * zSize + yInd * zSize + zInd;
int numThreads = xSize * ySize * zSize;
// Tensors
__shared__ dtype a[nsets * ncoeff];
// These are all of the sets in reaclib with two nuclei as reactants
// where one of them is carbon-12.
if(ind == 0)
{
// he4 + c12 -> o16 (1)
a[0] = 69.6526;
a[1] = -1.39254;
a[2] = 58.9128;
a[3] = -148.273;
a[4] = 9.08324;
a[5] = -0.541041;
a[6] = 70.3554;
// he4 + c12 -> o16 (2)
a[7] = 254.634;
a[8] = -1.84097;
a[9] = 103.411;
a[10] = -420.567;
a[11] = 64.0874;
a[12] = -12.4624;
a[13] = 137.303;
// he4 + c12 -> n + o15
a[14] = 17.0115;
a[15] = -98.6615;
a[16] = 0.0;
a[17] = 0.124787;
a[18] = 0.0588937;
a[19] = -0.00679206;
a[20] = 0.0;
// he4 + c12 -> p + n15 (1)
a[21] = 27.118;
a[22] = -57.6279;
a[23] = -15.253;
a[24] = 1.59318;
a[25] = 2.4479;
a[26] = -2.19708;
a[27] = -0.666667;
// he4 + c12 -> p + n15 (2)
a[28] = -5.2319;
a[29] = -59.6491;
a[30] = 0.0;
a[31] = 30.8497;
a[32] = -8.50433;
a[33] = -1.54426;
a[34] = -1.5;
// he4 + c12 -> p + n15 (3)
a[35] = 20.5388;
a[36] = -65.034;
a[37] = 0.0;
a[38] = 0.0;
a[39] = 0.0;
a[40] = 0.0;
a[41] = -1.5;
// he4 + c12 -> p + n15 (4)
a[42] = -6.93365;
a[43] = -58.7917;
a[44] = 0.0;
a[45] = 22.7105;
a[46] = -2.90707;
a[47] = 0.205754;
a[48] = -1.5;
// c12 + c12 -> n + mg23
a[49] = -12.8056;
a[50] = -30.1498;
a[51] = 0.0;
a[52] = 11.4826;
a[53] = 1.82849;
a[54] = -0.34844;
a[55] = 0.0;
// c12 + c12 -> p + na23
a[56] = 60.9649;
a[57] = 0.0;
a[58] = -84.165;
a[59] = -1.4191;
a[60] = -0.114619;
a[61] = -0.070307;
a[62] = -0.666667;
// c12 + c12 -> he4 + ne20
a[63] = 61.2863;
a[64] = 0.0;
a[65] = -84.165;
a[66] = -1.56627;
a[67] = -0.0736084;
a[68] = -0.072797;
a[69] = -0.666667;
// he4 + he4 + he4 -> c12 (1)
a[70] = -0.971052;
a[71] = 0.0;
a[72] = -37.06;
a[73] = 29.3493;
a[74] = -115.507;
a[75] = -10.0;
a[76] = -1.33333;
// he4 + he4 + he4 -> c12 (2)
a[77] = -11.7884;
a[78] = -1.02446;
a[79] = -23.57;
a[80] = 20.4886;
a[81] = -12.9882;
a[82] = -20.0;
a[83] = -2.16667;
// he4 + he4 + he4 -> c12 (3)
a[84] = -24.3505;
a[85] = -4.12656;
a[86] = -13.49;
a[87] = 21.4259;
a[88] = -1.34769;
a[89] = 0.0879816;
a[90] = -13.1653;
}
__shared__ dtype temp[ncoeff * ncells];
for(int i = ind; i < ncells; i += numThreads)
{
dtype val = pow(10.0, 7 + 3.0 / ncells * i) * 1.0e-9;
temp[i] = 1.0;
temp[ncells + i] = 1 / val;
temp[2 * ncells + i] = pow(val, (dtype)(-1.0 / 3.0));
temp[3 * ncells + i] = pow(val, (dtype)(1.0 / 3.0));
temp[4 * ncells + i] = val;
temp[5 * ncells + i] = pow(val, (dtype)(5.0 / 3.0));
temp[6 * ncells + i] = log(val);
temp[7 * ncells + i] = 0.0;
}
__syncthreads();
/*******************************************
* Compute ln(lambda) for each set and cell *
*******************************************/
rates<dtype>(a, temp, lam, nsets, ncells, ncoeff);
}
int main()
{
// Tensor dimensions
const int nsets = 13, ncells = 10, ncoeff = 7;
// Results matrix
float *lam;
cudaMallocManaged(&lam, nsets * ncells * sizeof(float));
for(int i = 0; i < nsets; i++)
{
for(int j = 0; j < ncells; j++)
{
lam[i * ncells + j] = 0.0f;
}
printf("\n");
}
// Compute the rates
dim3 threadsPerBlock(nsets, ncells, ncoeff);
dim3 numBlocks(1, 1, 1);
exec<float, nsets, ncells, ncoeff><<<numBlocks, threadsPerBlock>>>(lam);
// Print ln(lambda)
cudaDeviceSynchronize();
printf("lambda:\n");
for(int i = 0; i < nsets; i++)
{
for(int j = 0; j < ncells; j++)
{
printf("%8.3f ", lam[i * ncells + j]);
}
printf("\n");
}
return 0;
}
|
9,285 | /* Matrix normalization.
* Compile with "gcc matrixNorm.c"
*/
/* ****** ADD YOUR CODE AT THE END OF THIS FILE. ******
* You need not submit the provided code.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
/* Program Parameters */
#define MAXN 8000 /* Max value of N */
int N; /* Matrix size */
/* Matrices */
volatile float A[MAXN][MAXN], B[MAXN][MAXN];
/* junk */
#define randm() 4|2[uid]&3
/* Prototype */
void matrixNorm();
void cudaErrorCheck(cudaError_t err, const char *s);
__global__ void matrixCuda(float *d_A,int N);
__global__ void BlockMean(float *d_A,float *d_Sum, int N);
__global__ void BlockDev(float *d_A, float *d_Dev, float *d_mu,int N);
__global__ void Normalize(float *d_A, float *d_mu, float *d_sigma, int N);
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 3) {
seed = atoi(argv[2]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 2) {
N = atoi(argv[1]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
}
else {
printf("Usage: %s <matrix_dimension> [random seed]\n",
argv[0]);
exit(0);
}
/* Print parameters */
printf("\nMatrix dimension N = %i.\n", N);
}
/* Initialize A and B*/
void initialize_inputs() {
int row, col;
printf("\nInitializing...\n");
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
A[row][col] = (float)rand() / 32768.0;
B[row][col] = 0.0;
}
}
/*
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
A[row][col] = col + row;
B[row][col] = 0.0;
}
}
*/
}
/* Print input matrices */
void print_inputs() {
int row, col;
if (N < 10) {
printf("\nA =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
}
}
void print_B() {
int row, col;
if (N < 10) {
printf("\nB =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%1.10f%s", B[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
}
}
int main(int argc, char **argv) {
/* Timing variables */
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
/* Initialize A and B */
initialize_inputs();
/* Print input matrices */
print_inputs();
/* Start Clock */
printf("\nStarting clock.\n");
gettimeofday(&etstart, &tzdummy);
etstart2 = times(&cputstart);
/* Gaussian Elimination */
matrixNorm();
/* Stop Clock */
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
/* Display output */
print_B();
/* Display timing results */
printf("\nElapsed time = %g ms.\n",
(float)(usecstop - usecstart)/(float)1000);
printf("(CPU times are accurate to the nearest %g ms)\n",
1.0/(float)CLOCKS_PER_SEC * 1000.0);
printf("My total CPU time for parent = %g ms.\n",
(float)( (cputstop.tms_utime + cputstop.tms_stime) -
(cputstart.tms_utime + cputstart.tms_stime) ) /
(float)CLOCKS_PER_SEC * 1000);
printf("My system CPU time for parent = %g ms.\n",
(float)(cputstop.tms_stime - cputstart.tms_stime) /
(float)CLOCKS_PER_SEC * 1000);
printf("My total CPU time for child processes = %g ms.\n",
(float)( (cputstop.tms_cutime + cputstop.tms_cstime) -
(cputstart.tms_cutime + cputstart.tms_cstime) ) /
(float)CLOCKS_PER_SEC * 1000);
/* Contrary to the man pages, this appears not to include the parent */
printf("--------------------------------------------\n");
exit(0);
}
/* ------------------ Above Was Provided --------------------- */
/****** You will replace this routine with your own parallel version *******/
/* Provided global variables are MAXN, N, A[][] and B[][],
* defined in the beginning of this code. B[][] is initialized to zeros.
*/
/* Matrix Normalization for the Cuda platform
Overview
1.Copy matrix A to Device
2.CUDA: Split matrix into 2D grid of blocks, each block calculates a partial sum
for each column in their section via BlockMean.
3.Sequentially add all of these partial sums to form the total mean for
each column.
4.CUDA: Each block calculate a partial sum of the mean difference for each
column in their section via BlockDev.
5.Sequentially add all of these partial mean differences to form the total
standard deviation for each column.
6.CUDA: Each block normalize their portion of the matrix using the means
and standard deviations calculated.
7.Copy matrix A to Host's B
*/
//Number of threads per block
#define BlockSize 32
void matrixNorm()
{
printf("Executing on GPU\n");
//Set grid size to divide among number of threads
int GridSize = ceil((float)N/BlockSize);
//Create CUDA grid and block size for matrix
dim3 grid(GridSize,GridSize);
dim3 block(BlockSize,BlockSize);
//Device matrices and vectors for calculation
float *d_A; //the matrix
float *d_Sum; //a partial sum holder
float *d_Dev; //a partial deviation holder
float *d_mu; //a vector of means for each column
float *d_sigma; //a vector of standard deviations for each column
//Host copies of variables to initalize the device's copies to 0's
float *h_Sum = (float *)malloc(GridSize*N*sizeof(float));
float *h_Dev = (float *)malloc(GridSize*N*sizeof(float));
float *h_mu = (float *)malloc(N*sizeof(float));
float *h_sigma = (float *)malloc(N*sizeof(float));
size_t size = N*N*sizeof(float);
// Allocate Matrices on Devices
cudaErrorCheck(cudaMalloc((void **)&d_A,size), "cudaMalloc A");
cudaErrorCheck(cudaMalloc(&d_Sum,GridSize*N*sizeof(float)), "cudaMalloc d_Sum" );
cudaErrorCheck(cudaMalloc(&d_Dev,GridSize*N*sizeof(float)), "cudaMalloc d_Dev ");
cudaErrorCheck(cudaMalloc((void **)&d_mu,N*sizeof(float)), "cudaMalloc d_mu");
cudaErrorCheck(cudaMalloc((void **)&d_sigma,N*sizeof(float)), "cudaMalloc d_sigma");
// Copy over matrix to device
cudaErrorCheck(cudaMemcpy(d_A,(const void
*)A[0],size,cudaMemcpyHostToDevice), "cudaMemcpy A");
//Initalize h_sum and h_dev to 0
int row,col;
for(row = 0; row < GridSize; row++)
{
h_mu[row] = 0.0;
h_sigma[row] = 0.0;
for(col = 0; col < N; col++)
{
h_Sum[row*N + col] = 0.0;
h_Dev[row*N + col] = 0.0;
}
}
//Initalize the device sum and std dev arrays to 0s.
cudaErrorCheck(cudaMemcpy((void *)d_Sum,(const void
*)h_Sum,GridSize*N*sizeof(float),cudaMemcpyHostToDevice),
"cudaMemcpy to d_Sum");
cudaErrorCheck(cudaMemcpy((void *)d_Dev,(const void
*)h_Dev,GridSize*N*sizeof(float),cudaMemcpyHostToDevice),"cudaMemcpy to d_Dev" );
cudaErrorCheck(cudaMemcpy((void *)d_mu,(const void
*)h_mu,N*sizeof(float),cudaMemcpyHostToDevice), "cudaMemcpy to d_mu" );
cudaErrorCheck(cudaMemcpy((void *)d_sigma,(const void
*)h_sigma,N*sizeof(float),cudaMemcpyHostToDevice), "cudaMemcpy to d_sigma");
//Calcuate a sub mean for each block
BlockMean<<<grid,block>>>(d_A,d_Sum,N);
cudaDeviceSynchronize();
//Calculate total mean for each column sequentially
cudaErrorCheck(cudaMemcpy((void *)h_Sum,(const void
*)d_Sum,GridSize*N*sizeof(float),cudaMemcpyDeviceToHost),
"cudaMemcpy to h_Sum");
for(row = 0; row < GridSize; row++)
{
for(col = 0; col < N; col++)
{
h_mu[col] += h_Sum[row*N + col];
}
}
for(col = 0; col < N; col++)
h_mu[col] /= N;
//Copy over host calculated mu vector to host
cudaErrorCheck(cudaMemcpy((void *)d_mu,(const void
*)h_mu,N*sizeof(float),cudaMemcpyHostToDevice), "cudaMemcpy to d_mu");
//Calculate a sub standard deviation for each block
BlockDev<<<grid,block>>>(d_A,d_Dev,d_mu,N);
cudaDeviceSynchronize();
//Calculate total standard deviation from each block sequentially
cudaErrorCheck(cudaMemcpy((void *)h_Dev,(const void
*)d_Dev,GridSize*N*sizeof(float),cudaMemcpyDeviceToHost),
"cudaMemcpy to h_dev");
for(row = 0; row < GridSize; row++)
{
for(col = 0; col < N; col++)
{
h_sigma[col] += h_Dev[row*N + col];
}
}
for(col = 0; col < N; col++)
{
h_sigma[col] /= N;
h_sigma[col] = sqrt(h_sigma[col]);
}
//Copy sigma vector to device
cudaErrorCheck(cudaMemcpy((void *)d_sigma,(const void
*)h_sigma,N*sizeof(float),cudaMemcpyHostToDevice), "cudaMemcpy to d_sigma" );
//Normalize with means and standard deviations by splitting into blocks
Normalize<<<grid,block>>>(d_A,d_mu,d_sigma,N);
cudaDeviceSynchronize();
//Copy Normalized array back to B
cudaErrorCheck(cudaMemcpy((void *)B[0],(const void
*)d_A,size,cudaMemcpyDeviceToHost), "cudaMemcpy to B");
//Free all host and device pointers
cudaFree(d_A);
cudaFree(d_Sum);
cudaFree(d_Dev);
cudaFree(d_mu);
cudaFree(d_sigma);
free(h_Sum);
free(h_Dev);
free(h_mu);
free(h_sigma);
}
/* Calculates a partial sum for a section of the matrix for each block.
This is done by allocating a shared sub matrix, calculated a
sum for each column using the algorithm from class, and returning
the corresponding sub sum from each block.
d_Sum holds all of these partial sums for every block.
*/
__global__ void BlockMean(float *d_A,float *d_Sum, int N)
{
//Shared sub matrix
__shared__ float sum[BlockSize*BlockSize];
//Block and thread indices
unsigned int x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int y = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
//Size of entire matrix
int size = N*N*sizeof(float);
//Indices for flattened input matrices
// i + j == [i][j]
unsigned int i = blockIdx.x*BlockSize*N + tx*N;
unsigned int j = blockIdx.y*BlockSize + ty;
//Row index into sum
unsigned int sx = tx*BlockSize;
//Ensure block and thread within bounds of matrix
if(x >= N || y >= N)
return;
//Transfer section of d_A into sum
if(i + j < size)
sum[sx + ty] = d_A[i + j];
else
sum[sx + ty] = 0.0;
//Apply partial sum algorithm from class
for(unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if(tx < stride)
sum[sx + ty] += sum[sx + ty + stride];
}
//Transfer shared sub sum matrix to global memory
if(tx == 0)
{
d_Sum[blockIdx.x*N + ty] = sum[ty];
}
}
/* Calculate a partial sum of the square of the difference between the mean
for each block, in the same way as BlockMean, except by squaring a
difference of a calculated mean for the column in d_mu.
*/
__global__ void BlockDev(float *d_A, float *d_Dev, float *d_mu, int N)
{
//Shared mu vector
__shared__ float mu[BlockSize];
//shared partial sum sub matrix
__shared__ float sum[BlockSize*BlockSize];
//Size of entire matrix
int size = N*N*sizeof(float);
//Block and thread indices
unsigned int x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int y = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
//Indices for flattened input matrices
// i + j == [i][j]
unsigned int i = blockIdx.x*BlockSize*N + tx*N;
unsigned int j = blockIdx.y*BlockSize + ty;
//Row index into sum
unsigned int sx = tx*BlockSize;
//Ensure block and thread within bounds of matrix
if(x >= N || y >= N)
return;
//Transfer sub mu vector into shared memory
if(tx == 0)
{
mu[ty] = d_mu[j];
}
//Ensure mu vector populated
__syncthreads();
//Transfer section of d_A into sum and square the mean difference
if((i + j < size) && (j < N))
sum[sx + ty] = powf(d_A[i + j] - mu[ty],2.0);
else
sum[sx + ty] = 0.0;
//Apply partial sum algorithm shown in class
for(unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if(tx < stride)
sum[sx + ty] += sum[sx + ty + stride];
}
//Tranfer shared sub matrix to global memory
if(tx == 0)
{
d_Dev[blockIdx.x*N + ty] = sum[ty];
}
}
/* Normalizing function, which each thread among all blocks corresponds
to a single element in the matrix. Each applies the normalizing function
to its element*/
__global__ void Normalize(float *d_A, float *d_mu, float *d_sigma, int N)
{
// Thread position variables
unsigned int tx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int ty = blockDim.y * blockIdx.y + threadIdx.y;
//Ensure within bounds of matrix
if(tx >= N || ty >= N)
return;
//If sigma 0, set 0, else calculate normalized value
if(d_sigma[ty] == 0)
d_A[tx + ty*N] = 0;
else
d_A[tx + ty*N] = (d_A[tx + ty*N] - d_mu[ty])/(d_sigma[ty]);
}
/*Simple printing error function */
void cudaErrorCheck(cudaError_t err, const char *s)
{
if(err != cudaSuccess)
{
printf("%s error: %s\n",s,cudaGetErrorString(err));
exit(0);
}
}
|
9,286 | #include <stdio.h>
// Kernel
__global__ void bit_shift_test()
{
unsigned long long one = 1;
for (int i = 0; i <= 64; i++) {
printf("(%llu << %d) - 1 = %llu\n", one, i, (one << i) - 1);
}
unsigned long long max = 18446744073709551615;
for (int i = 0; i <= 64; i++) {
printf("%llu >> %d = %llu\n", max, i, max >> i);
}
}
// Main program
int main()
{
// Launch kernel
bit_shift_test<<<1, 1>>>();
cudaError_t cuErrSync = cudaGetLastError();
cudaError_t cuErrAsync = cudaDeviceSynchronize();
if (cuErrSync != cudaSuccess) { printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErrSync)); exit(0); }
if (cuErrAsync != cudaSuccess) { printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErrAsync)); exit(0); }
return 0;
}
|
9,287 |
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <algorithm>
#include <vector>
using namespace std;
#define CUDA_CHECK(cmd) {\
cudaError_t err = cmd; \
if(err != cudaSuccess) fprintf(stderr, "Error in %s (%d) - Name: %s - String: %s\n", __FILE__, __LINE__, cudaGetErrorName(err), cudaGetErrorString(err)); \
}
#define GET_CLOCK(t) if(clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &t) != 0) {fprintf(stderr, "%s (%d): Error in timer.\n", __FILE__, __LINE__); exit(1);}
#define COMPUTE_DELTA_T(diff, t2, t1) diff = (t2.tv_sec - t1.tv_sec) * 1E9 + (t2.tv_nsec - t1.tv_nsec);
#ifdef PINNED
#define CUDA_ALLOC cudaMallocHost
#define CUDA_FREE cudaFreeHost
#else
#define CUDA_ALLOC cudaMalloc
#define CUDA_FREE cudaFree
#endif
bool cmpfunc (double a, double b) { return a < b; }
void measure_data_transfer(int NumberOfDoubleValuesAsVector, int NumberOfVectors, int InnerIterationCount, int OuterIterationCount,
vector<double> *timings_h2d, vector<double> *timings_d2h) {
int sz = sizeof(double) * NumberOfDoubleValuesAsVector;
struct timespec t1, t2;
double diff;
double **data_host;
data_host = (double **) malloc(sizeof(double*) * NumberOfVectors);
double **data_device;
data_device = (double **) malloc(sizeof(double*) * NumberOfVectors);
for(int i=0;i<NumberOfVectors;i++) {
data_host[i] = (double *) malloc(sz);
}
// Allocation on GPU
for(int i=0;i<NumberOfVectors;i++) {
double *d;
CUDA_CHECK(CUDA_ALLOC((void**)&d, sz));
data_device[i] = d;
}
for(int q=0;q < OuterIterationCount; q++) {
GET_CLOCK(t1);
// Host to device
for(int i=0;i<InnerIterationCount;i++) {
CUDA_CHECK(cudaMemcpy(data_device[i % NumberOfVectors], data_host[i % NumberOfVectors], sz, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaDeviceSynchronize());
}
GET_CLOCK(t2);
COMPUTE_DELTA_T(diff, t2, t1);
timings_h2d->push_back(diff / InnerIterationCount);
GET_CLOCK(t1);
// Device to host
for(int i=0;i<InnerIterationCount;i++) {
CUDA_CHECK(cudaMemcpy(data_host[i % NumberOfVectors], data_device[i % NumberOfVectors], sz, cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaDeviceSynchronize());
}
GET_CLOCK(t2);
COMPUTE_DELTA_T(diff, t2, t1);
timings_d2h->push_back(diff / InnerIterationCount);
}
for(int i=0;i<NumberOfVectors;i++) {
free(data_host[i]);
CUDA_FREE(data_device[i]);
}
free(data_host);
free(data_device);
}
int main(int argc, char **argv) {
if(argc < 5) {
fprintf(stderr, "Usage: %s <NumberOfDoubleValuesAsVector> <NumberOfVectors> <InnerIterationCount> <OuterIterationCount>\n", argv[0]);
return 1;
}
int NumberOfDoubleValuesAsVector = atoi(argv[1]);
int NumberOfVectors = atoi(argv[2]);
int InnerIterationCount = atoi(argv[3]);
int OuterIterationCount = atoi(argv[4]);
if(NumberOfVectors < 2) {
fprintf(stderr, "Unacceptable value for <NumberOfVectors> parameter.\n");
return 1;
}
printf("\n\n");
printf("------------------------\n");
printf("NumberOfDoubleValuesAsVector: %d\n", NumberOfDoubleValuesAsVector);
printf("NumberOfVectors: %d\n", NumberOfVectors);
printf("InnerIterationCount: %d\n", InnerIterationCount);
printf("OuterIterationCount: %d\n", OuterIterationCount);
printf("------------------------\n");
printf("\n\n");
// double *timings_h2d = (double*) malloc(sizeof(double) * NumberOfVectors);
// double *timings_d2h = (double*) malloc(sizeof(double) * NumberOfVectors);
vector<double> timings_d2h, timings_h2d;
fprintf(stderr, "Warmup...\n");
measure_data_transfer(NumberOfDoubleValuesAsVector, NumberOfVectors, 5, 5, &timings_h2d, &timings_d2h) ;
fprintf(stderr, "Warmup...Done\n");
timings_h2d.clear();
timings_d2h.clear();
fprintf(stderr, "Main operations...\n");
measure_data_transfer(NumberOfDoubleValuesAsVector, NumberOfVectors, InnerIterationCount, OuterIterationCount, &timings_h2d, &timings_d2h) ;
fprintf(stderr, "Main operations...Done\n");
sort(timings_h2d.begin(), timings_h2d.end(), cmpfunc);
sort(timings_d2h.begin(), timings_d2h.end(), cmpfunc);
double t_h2d, t_d2h;
if(NumberOfVectors % 2 == 1) {
t_h2d = timings_h2d[NumberOfVectors/2];
t_d2h = timings_d2h[NumberOfVectors/2];
} else {
t_h2d = timings_h2d[NumberOfVectors/2];
t_h2d += timings_h2d[NumberOfVectors/2 - 1];
t_h2d /= 2;
t_d2h = timings_d2h[NumberOfVectors/2];
t_d2h += timings_d2h[NumberOfVectors/2 - 1];
t_d2h /= 2;
}
int sz = NumberOfDoubleValuesAsVector * sizeof(double);
printf("\n\nResults:\n");
printf("========================\n");
printf("Vector size: %d Bytes\n", sz);
printf("========================\n");
printf("H2D median: %.2fus\n", t_h2d / 1E3);
printf("D2H median: %.2fus\n", t_d2h / 1E3);
printf("------------------------\n");
printf("H2D median: %.2fms\n", t_h2d / 1E6);
printf("D2H median: %.2fms\n", t_d2h / 1E6);
printf("------------------------\n");
printf("csv_output,%dB,%.2fKB,%.2fMB,%.2fGB,%.2fus,%.2fus\n", sz, 1.0*sz/1024, 1.0*sz/(1024*1024), 1.0*sz/(1024*1024*1024), t_h2d / 1E3, t_d2h / 1E3);
printf("------------------------\n");
return 0;
}
|
9,288 | #include <stdio.h>
#include <cuda_runtime.h>
//#include <cutil.h>
#define TILE_WIDTH 64
#define WIDTH_PER_THREAD 4
#define SW TILE_WIDTH/WIDTH_PER_THREAD
#define N 2048
void err_handling(cudaError_t *err, const char *str)
{
if (*err != cudaSuccess) {
printf("%s\n", str);
exit(EXIT_FAILURE);
}
}
__global__ void matMul(const float *A, const float *B, float *C, int m, int k, int n)
{
__shared__ float sh_A00[SW][SW];
__shared__ float sh_A01[SW][SW];
__shared__ float sh_A02[SW][SW];
__shared__ float sh_A03[SW][SW];
__shared__ float sh_A10[SW][SW];
__shared__ float sh_A11[SW][SW];
__shared__ float sh_A12[SW][SW];
__shared__ float sh_A13[SW][SW];
__shared__ float sh_A20[SW][SW];
__shared__ float sh_A21[SW][SW];
__shared__ float sh_A22[SW][SW];
__shared__ float sh_A23[SW][SW];
__shared__ float sh_A30[SW][SW];
__shared__ float sh_A31[SW][SW];
__shared__ float sh_A32[SW][SW];
__shared__ float sh_A33[SW][SW];
__shared__ float sh_B00[SW][SW];
__shared__ float sh_B01[SW][SW];
__shared__ float sh_B02[SW][SW];
__shared__ float sh_B03[SW][SW];
__shared__ float sh_B10[SW][SW];
__shared__ float sh_B11[SW][SW];
__shared__ float sh_B12[SW][SW];
__shared__ float sh_B13[SW][SW];
__shared__ float sh_B20[SW][SW];
__shared__ float sh_B21[SW][SW];
__shared__ float sh_B22[SW][SW];
__shared__ float sh_B23[SW][SW];
__shared__ float sh_B30[SW][SW];
__shared__ float sh_B31[SW][SW];
__shared__ float sh_B32[SW][SW];
__shared__ float sh_B33[SW][SW];
int x = threadIdx.x;
int y = threadIdx.y;
int tx = x*WIDTH_PER_THREAD;
int ty = y*WIDTH_PER_THREAD;
int row = blockIdx.y*TILE_WIDTH + ty;
int col = blockIdx.x*TILE_WIDTH + tx;
float c00 = 0.0;
float c01 = 0.0;
float c02 = 0.0;
float c03 = 0.0;
float c10 = 0.0;
float c11 = 0.0;
float c12 = 0.0;
float c13 = 0.0;
float c20 = 0.0;
float c21 = 0.0;
float c22 = 0.0;
float c23 = 0.0;
float c30 = 0.0;
float c31 = 0.0;
float c32 = 0.0;
float c33 = 0.0;
float a00 = 0.0;
float a01 = 0.0;
float a02 = 0.0;
float a03 = 0.0;
float a10 = 0.0;
float a11 = 0.0;
float a12 = 0.0;
float a13 = 0.0;
float a20 = 0.0;
float a21 = 0.0;
float a22 = 0.0;
float a23 = 0.0;
float a30 = 0.0;
float a31 = 0.0;
float a32 = 0.0;
float a33 = 0.0;
float b00 = 0.0;
float b01 = 0.0;
float b02 = 0.0;
float b03 = 0.0;
float b10 = 0.0;
float b11 = 0.0;
float b12 = 0.0;
float b13 = 0.0;
float b20 = 0.0;
float b21 = 0.0;
float b22 = 0.0;
float b23 = 0.0;
float b30 = 0.0;
float b31 = 0.0;
float b32 = 0.0;
float b33 = 0.0;
for (int t = 0; t < k; t += TILE_WIDTH) {
sh_A00[y][x] = A[row*k + t + tx];
sh_A01[y][x] = A[row*k + t + tx+1];
sh_A02[y][x] = A[row*k + t + tx+2];
sh_A03[y][x] = A[row*k + t + tx+3];
sh_A10[y][x] = A[(row+1)*k + t + tx];
sh_A11[y][x] = A[(row+1)*k + t + tx+1];
sh_A12[y][x] = A[(row+1)*k + t + tx+2];
sh_A13[y][x] = A[(row+1)*k + t + tx+3];
sh_A20[y][x] = A[(row+2)*k + t + tx];
sh_A21[y][x] = A[(row+2)*k + t + tx+1];
sh_A22[y][x] = A[(row+2)*k + t + tx+2];
sh_A23[y][x] = A[(row+2)*k + t + tx+3];
sh_A30[y][x] = A[(row+3)*k + t + tx];
sh_A31[y][x] = A[(row+3)*k + t + tx+1];
sh_A32[y][x] = A[(row+3)*k + t + tx+2];
sh_A33[y][x] = A[(row+3)*k + t + tx+3];
sh_B00[y][x] = B[(t+ty)*k + col];
sh_B01[y][x] = B[(t+ty)*k + col+1];
sh_B02[y][x] = B[(t+ty)*k + col+2];
sh_B03[y][x] = B[(t+ty)*k + col+3];
sh_B10[y][x] = B[(t+ty+1)*k + col];
sh_B11[y][x] = B[(t+ty+1)*k + col+1];
sh_B12[y][x] = B[(t+ty+1)*k + col+2];
sh_B13[y][x] = B[(t+ty+1)*k + col+3];
sh_B20[y][x] = B[(t+ty+2)*k + col];
sh_B21[y][x] = B[(t+ty+2)*k + col+1];
sh_B22[y][x] = B[(t+ty+2)*k + col+2];
sh_B23[y][x] = B[(t+ty+2)*k + col+3];
sh_B30[y][x] = B[(t+ty+3)*k + col];
sh_B31[y][x] = B[(t+ty+3)*k + col+1];
sh_B32[y][x] = B[(t+ty+3)*k + col+2];
sh_B33[y][x] = B[(t+ty+3)*k + col+3];
__syncthreads();
int ii = x;
for (int i = 0; i < TILE_WIDTH; i += WIDTH_PER_THREAD) {
ii %= 16;
a00 = sh_A00[y][ii];
a01 = sh_A01[y][ii];
a10 = sh_A10[y][ii];
a11 = sh_A11[y][ii];
b00 = sh_B00[ii][x];
b01 = sh_B01[ii][x];
b10 = sh_B10[ii][x];
b11 = sh_B11[ii][x];
c00 += a00*b00 + a01*b10;
c01 += a00*b01 + a01*b11;
c10 += a10*b00 + a11*b10;
c11 += a10*b01 + a11*b11;
/*******************************************************************/
a22 = sh_A22[y][ii];
a23 = sh_A23[y][ii];
a32 = sh_A32[y][ii];
a33 = sh_A33[y][ii];
b22 = sh_B22[ii][x];
b23 = sh_B23[ii][x];
b32 = sh_B32[ii][x];
b33 = sh_B33[ii][x];
c22 += a22*b22 + a23*b32;
c23 += a22*b23 + a23*b33;
c32 += a32*b22 + a33*b32;
c33 += a32*b23 + a33*b33;
/*******************************************************************/
a02 = sh_A02[y][ii];
a03 = sh_A03[y][ii];
a12 = sh_A12[y][ii];
a13 = sh_A13[y][ii];
b20 = sh_B20[ii][x];
b21 = sh_B21[ii][x];
b30 = sh_B30[ii][x];
b31 = sh_B31[ii][x];
c00 += a02*b20 + a03*b30;
c01 += a02*b21 + a03*b31;
c10 += a12*b20 + a13*b30;
c11 += a12*b21 + a13*b31;
c02 += a02*b22 + a03*b32;
c03 += a02*b23 + a03*b33;
c12 += a12*b22 + a13*b32;
c13 += a12*b23 + a13*b33;
c20 += a22*b20 + a23*b30;
c21 += a22*b21 + a23*b31;
c30 += a32*b20 + a33*b30;
c31 += a32*b21 + a33*b31;
/*******************************************************************/
a20 = sh_A20[y][ii];
a21 = sh_A21[y][ii];
a30 = sh_A30[y][ii];
a31 = sh_A31[y][ii];
b02 = sh_B02[ii][x];
b03 = sh_B03[ii][x];
b12 = sh_B12[ii][x];
b13 = sh_B13[ii][x];
c22 += a20*b02 + a21*b12;
c23 += a20*b03 + a21*b13;
c32 += a30*b02 + a31*b12;
c33 += a30*b03 + a31*b13;
c20 += a20*b00 + a21*b10;
c21 += a20*b01 + a21*b11;
c30 += a30*b00 + a31*b10;
c31 += a30*b01 + a31*b11;
c02 += a00*b02 + a01*b12;
c03 += a00*b03 + a01*b13;
c12 += a10*b02 + a11*b12;
c13 += a10*b03 + a11*b13;
/*******************************************************************/
++ii;
}
__syncthreads();
}
C[row*n + col] = c00;
C[row*n + col+1] = c01;
C[row*n + col+2] = c02;
C[row*n + col+3] = c03;
C[(row+1)*n + col] = c10;
C[(row+1)*n + col+1] = c11;
C[(row+1)*n + col+2] = c12;
C[(row+1)*n + col+3] = c13;
C[(row+2)*n + col] = c20;
C[(row+2)*n + col+1] = c21;
C[(row+2)*n + col+2] = c22;
C[(row+2)*n + col+3] = c23;
C[(row+3)*n + col] = c30;
C[(row+3)*n + col+1] = c31;
C[(row+3)*n + col+2] = c32;
C[(row+3)*n + col+3] = c33;
}
int main(void)
{
cudaError_t err = cudaSuccess;
int m = N;
int n = N;
int k = N;
float *A = (float*)malloc(m*k*sizeof(float));
float *B = (float*)malloc(k*n*sizeof(float));
float *C = (float*)malloc(m*n*sizeof(float));
if (A == NULL || B == NULL || C == NULL) {
printf("allocate host error!\n");
return 1;
}
for (int i = 0; i < m*k; ++i) {
A[i] = rand()/(float)RAND_MAX;
}
for (int i = 0; i < k*n; ++i) {
B[i] = rand()/(float)RAND_MAX;
}
for (int i = 0; i < m*n; ++i) {
C[i] = rand()/(float)RAND_MAX;
}
float *dev_A = NULL;
float *dev_B = NULL;
float *dev_C = NULL;
err = cudaMalloc((void**)&dev_A, m*k*sizeof(float));
err_handling(&err, "allocate devecie error A!");
err = cudaMalloc((void**)&dev_B, k*n*sizeof(float));
err_handling(&err, "allocate devecie error B!");
err = cudaMalloc((void**)&dev_C, m*n*sizeof(float));
err_handling(&err, "allocate devecie error C!");
err = cudaMemcpy(dev_A, A, m*k*sizeof(float), cudaMemcpyHostToDevice);
err_handling(&err, "memcpy to A error!");
err = cudaMemcpy(dev_B, B, k*n*sizeof(float), cudaMemcpyHostToDevice);
err_handling(&err, "memcpy to B error!");
dim3 dimGrid((m-1)/TILE_WIDTH+1, (n-1)/TILE_WIDTH+1, 1);
dim3 dimBlock(TILE_WIDTH/WIDTH_PER_THREAD, TILE_WIDTH/WIDTH_PER_THREAD, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
matMul<<<dimGrid, dimBlock>>>(dev_A, dev_B, dev_C, m, k, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
float time_elapsed = 0;
cudaEventElapsedTime(&time_elapsed, start, stop);
printf("%fms\n", time_elapsed);
err = cudaMemcpy(C, dev_C, m*n*sizeof(float), cudaMemcpyDeviceToHost);
err_handling(&err, "memcpy to host C error!");
printf("%f %f\n", C[100*N+100], C[234*N+234]);
err = cudaFree(dev_A);
err_handling(&err, "mem free A error!");
err = cudaFree(dev_B);
err_handling(&err, "mem free B error!");
err = cudaFree(dev_C);
err_handling(&err, "mem free C error!");
err = cudaDeviceReset();
err_handling(&err, "device reset error!");
return 0;
}
|
9,289 | #include "includes.h"
__global__ void bp_weight_conv(float* d_weight, float* d_preact, float* p_output, const int kernel_size, const int size, const int n_size, const int in_channel, const int out_channel, bool SAME)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int totalPos = blockDim.x * gridDim.x;
const int N = kernel_size * kernel_size * n_size * n_size * in_channel * out_channel; // total number of connections in this convolution
const int weight_channel = in_channel * out_channel; // actual number of channels of weight matrix
const int padding = (kernel_size - 1) / 2; // number of padding for both ends
int input_row, input_col;
// distribute certain number of connections to each thread regardless of detailed position and shape
for(int n = N * pos / totalPos; n < N * (pos+1) / totalPos; n++){
int idx = n;
const int i_kernel_row = ((idx /= 1 ) % kernel_size);
const int i_kernel_col = ((idx /= kernel_size ) % kernel_size);
const int i_channel = ((idx /= kernel_size ) % weight_channel);
const int i_row = ((idx /= weight_channel ) % n_size);
const int i_col = ((idx /= n_size ) % n_size);
// corresponding position of the input matrix
if (SAME){ // SAME padding scheme implemented
input_row = i_kernel_row + i_row - padding;
input_col = i_kernel_col + i_col - padding;
}
else{
input_row = i_kernel_row + i_row;
input_col = i_kernel_col + i_col;
}
if(input_row >= 0 && input_row < size && input_col >=0 && input_col < size){
atomicAdd(&d_weight[(i_channel * kernel_size + i_kernel_col) * kernel_size + i_kernel_row],
d_preact[((i_channel % out_channel) * n_size + i_col) * n_size + i_row] * p_output[((i_channel % in_channel) * size + input_col) + input_row]);
}
}
} |
9,290 | #include <stdio.h>
#include <cuda.h>
// Kernel that executes on the CUDA device
__global__ void square_array(float *a, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
a[idx] = a[idx] * a[idx];
}
void print_device_properties(void)
{
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %zu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %zu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %zu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %zu\n", devProp.totalConstMem);
printf("Texture alignment: %zu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
}
// main routine that executes on the host
int main(void)
{
print_device_properties();
const int N = 10;
float a_h[N];
float *a_d;
cudaMalloc((void **) &a_d, sizeof(a_h));
for (int i=0; i < N; i++)
a_h[i] = i;
cudaMemcpy(a_d, a_h, sizeof(a_h), cudaMemcpyHostToDevice);
int block_size = 4;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
square_array <<< n_blocks, block_size >>> (a_d, N);
cudaMemcpy(a_h, a_d, sizeof(a_h), cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++) {
printf("%d %f\n", i, a_h[i]);
if (abs(a_h[i] - i*i) > 0.001)
return -1;
}
cudaFree(a_d);
}
|
9,291 | #include <cuda_runtime_api.h>
#include <iostream>
// Managed variables may be defined like device variables
__managed__ unsigned int mFoo;
// Print a managed variable
__global__ void PrintFoo()
{
printf("mFoo GPU: %d\n", mFoo);
}
// Print a managed array of integers
__global__ void PrintBar(const int* mBarPtr, unsigned int numEntries)
{
printf("mBar GPU: ");
for (int i = 0; i < numEntries; i++)
printf("%d%s", mBarPtr[i], (i == numEntries - 1) ? "\n" : ", ");
}
int main()
{
std::cout << "==== Sample 13 - Managed Memory ====\n" << std::endl;
/*
Managed memory reduces code complexity by decoupling physical
memory location from address range. The CUDA runtime will take
care of moving the memory to the location where it is needed.
No copies are required, but care must be taken for concurrent
access. To avoid performance degradation, managed memory should
be prefetched.
Expected output:
mFoo GPU: 14
mBar GPU: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13
mBar CPU: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13
CUDA device does (NOT) support concurrent access
mFoo GPU: 42
*/
constexpr unsigned int VALUE = 14;
// We may assign values to managed variables on the CPU
mFoo = VALUE;
// Managed variables can be used without explicit transfer
PrintFoo<<<1,1>>>();
// Wait for printf output
cudaDeviceSynchronize();
// We may also allocate managed memory on demand
int* mBarPtr;
cudaMallocManaged((void**)&mBarPtr, VALUE * sizeof(int));
// Managed memory can be directly initialized on the CPU
for (int i = 0; i < VALUE; i++)
mBarPtr[i] = i;
/*
If we know ahead of time where managed memory will be used
and performance is essential, we can prefetch it to the
required location. This basically replaces memcpy. Note
however, that this action requires support for the
concurrentAccess property. Support for concurrent access
is queried via device properties.
*/
int device;
cudaGetDevice(&device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
// Report support
std::cout << "\nCUDA device does " << (!prop.concurrentManagedAccess ? "NOT " : "") << "support concurrent access\n";
// If we can, we prefetch ahead of time
if(prop.concurrentManagedAccess)
cudaMemPrefetchAsync(mBarPtr, VALUE * sizeof(int), device);
// Launch kernel with managed memory pointer as parameter
PrintBar<<<1,1>>>(mBarPtr, VALUE);
// We may also prefetch it back to the CPU
if (prop.concurrentManagedAccess)
cudaMemPrefetchAsync(mBarPtr, VALUE * sizeof(int), cudaCpuDeviceId);
// Wait for GPU printing and prefetching to finish
cudaDeviceSynchronize();
std::cout << "mBar CPU: ";
for (int i = 0; i < VALUE; i++)
std::cout << mBarPtr[i] << (i == VALUE - 1 ? "\n" : ", ");
/*
Devices may or may not support concurrent access to variables.
If they don't, then the CPU must ensure that access to managed
memory does not overlap with GPU kernel execution, even if the
GPU does not use the managed memory in question. Modifying
a variable on the CPU before a kernel is fine, because the kernel
will only be launched if the CPU is done with prior instructions.
*/
// Handling access to managed memory, depending on device properties
mFoo = 42;
PrintFoo<<<1, 1>>>();
if (!prop.concurrentManagedAccess)
// CPU access to managed memory and GPU execution may not overlap
cudaDeviceSynchronize();
// Modify on CPU after / during GPU execution
mBarPtr[0] = 20;
// Wait for results of printf
cudaDeviceSynchronize();
return 0;
} |
9,292 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#define N 10
#define M 5
__global__ void add(int *a, int *b, int *c)
{
int tid = blockIdx.x * blockDim.x+threadIdx.x;
c[tid] = a[tid] + b[tid];
}
__host__ int main(void)
{
int a[N*M], b[N*M], c[N*M];
for (int i = 0; i<N*M; i++)
{
a[i] = -i;
b[i] = i * i;
}
int* devA;
int* devB;
int* devC;
cudaMalloc((void**)&devA, sizeof(int) * N*M);
cudaMalloc((void**)&devB, sizeof(int) * N*M);
cudaMalloc((void**)&devC, sizeof(int) * N*M);
cudaMemcpy(devA, a, sizeof(int) * N*M, cudaMemcpyHostToDevice);
cudaMemcpy(devB, b, sizeof(int) * N*M, cudaMemcpyHostToDevice);
//printf("%d + %d \n", (N + (M - 1) / M),M);
add <<<(N+(M-1)/M),M>>> (devA, devB, devC);
//add <<<(N, M >>>(devA, devB, devC);
cudaEvent_t syncEvent;
cudaEventCreate(&syncEvent);
cudaEventRecord(syncEvent, 0);
cudaEventSynchronize(syncEvent);
cudaMemcpy(c, devC, sizeof(int) * N*M, cudaMemcpyDeviceToHost);
for (int i = 0; i<N*M; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
cudaEventDestroy(syncEvent);
cudaFree(devA);
cudaFree(devB);
cudaFree(devC);
std::system("pause");
return 0;
} |
9,293 | #include <cufft.h>
#include <iostream>
#include <fstream>
#define BSZ 32
using namespace std;
__global__ void solve_poisson(cufftComplex *ft, cufftComplex *ft_k, float*k, int N){
int i = threadIdx.x + blockIdx.x*BSZ;
int j = threadIdx.y + blockIdx.y*BSZ;
int index = j*N+i;
if (i<N && j<N)
{
float k2 = k[i] * k[i] + k[j]*k[j];
if (i==0 && j == 0) {k2 = 1.0f;} //a to jest takie w chuj ciekawe dlaczego oni to tak robią wtf
ft_k[index].x = -ft[index].x/k2;
ft_k[index].y = -ft[index].y/k2;
}
}
__global__ void real2complex(float *f, cufftComplex *fc, int N){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int index = j*N + i;
if(i<N && j < N)
{
fc[index].x = f[index]; //ustawia rzeczywista
fc[index].y = 0.0f; //zeruje urojona
}
}
__global__ void complex2real(cufftComplex *fc, float *f, int N){
int i = threadIdx.x + blockIdx.x * BSZ;
int j = threadIdx.y + blockIdx.y * BSZ;
int index = j*N+i;
if (i<N && j < N)
{
f[index] = fc[index].x/((float)N*(float)N); //divide by number of elements to recover value
}
}
void write_to_file(char* filename, float* f, int N){
ofstream file;
file.open(filename);
for(int i = 0; i<N; i++)
for (int j = 0; j<N; j++)
{
file << f[N*j+i];
file << "\n";
}
file.close();
}
int main(){
int N = 64;
float xmax = 1.0f, xmin = 0.0f, ymin = 0.0f,
h = (xmax-xmin)/((float)N), s = 0.1, s2 = s*s;
float *x = new float[N*N], *y = new float[N*N], *u = new float[N*N],
*f = new float[N*N], *u_a = new float[N*N], *err = new float[N*N];
float r2;
for (int j = 0; j<N; j++)
for (int i=0; i<N; i++)
{
x[N*j + i] = xmin + i*h; //allocate position matrix
y[N*j + i] = ymin + j*h;
r2 = (x[N*j+i]-0.5)*(x[N*j+i]-0.5) + (y[N*j+i]-0.5)*(y[N*j+i]-0.5);
f[N*j+i] = (r2-2*s2)/(s2*s2)*exp(-r2/(2*s2));
u_a[N*j+i] = exp(-r2/(2*s2));
}
float *k = new float[N];
for (int i = 0; i<=N/2; i++)
{
k[i] = i*2*M_PI;
}
for(int i = N/2+1; i<N; i++)
{
k[i] = (i-N)*2*M_PI;
}
//the cuda begins here
//allocate arrays upon yon device
float *k_d, *f_d, *u_d;
cudaMalloc((void**)&k_d, sizeof(float)*N);
cudaMalloc((void**)&f_d, sizeof(float)*N*N);
cudaMalloc((void**)&u_d, sizeof(float)*N*N);
cudaMemcpy(k_d, k, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(f_d, f, sizeof(float)*N*N, cudaMemcpyHostToDevice);
cufftComplex *ft_d, *f_dc, *ft_d_k, *u_dc;
cudaMalloc ((void**)&ft_d, sizeof(cufftComplex)*N*N);
cudaMalloc ((void**)&ft_d_k, sizeof(cufftComplex)*N*N);
cudaMalloc ((void**)&f_dc, sizeof(cufftComplex)*N*N);
cudaMalloc ((void**)&u_dc, sizeof(cufftComplex)*N*N);
cout << "N-0.5)/BSZ: " << int((N-0.5)/BSZ) + 1 << endl;
dim3 dimGrid(int((N-0.5)/BSZ) + 1, int((N-0.5)/BSZ) + 1);
dim3 dimBlock(BSZ, BSZ);
real2complex<<<dimGrid, dimBlock>>>(f_d, f_dc, N);
cufftHandle plan;
cufftPlan2d(&plan, N, N, CUFFT_C2C);
cufftExecC2C(plan, f_dc, ft_d, CUFFT_FORWARD);
solve_poisson<<<dimGrid, dimBlock>>>(ft_d, ft_d_k, k_d, N);
cufftExecC2C(plan, ft_d_k, u_dc, CUFFT_INVERSE);
complex2real<<<dimGrid, dimBlock>>>(u_dc, u_d, N);
cudaMemcpy(u, u_d, sizeof(float)*N*N, cudaMemcpyDeviceToHost);
cufftDestroy(plan);
cudaFree(k_d);
cudaFree(f_d);
cudaFree(u_d);
float constant = u[0];
for (int i =0; i<N*N; i++)
{
// cout << u[i] << endl;
u[i] -= constant; //subtract u[0] to force arbitrary constant to be 0
}
write_to_file("x.dat", x, N);
write_to_file("y.dat", y, N);
write_to_file("u.dat", u, N);
write_to_file("u_a.dat", u_a, N);
write_to_file("f.dat", f, N);
}
|
9,294 | /***************************************************************************
**************************************************************************
Spherical Harmonic Transform Kit 2.7
Copyright 1997-2003 Sean Moore, Dennis Healy,
Dan Rockmore, Peter Kostelec
Copyright 2004 Peter Kostelec, Dan Rockmore
This file is part of SpharmonicKit.
SpharmonicKit is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
SpharmonicKit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
See the accompanying LICENSE file for details.
************************************************************************
************************************************************************/
/* these are the bit-reverse, 4096 roots of unity, and by subrecursion,
contains all Nth roots of unity (N < 4096) in bit-reversed order */
|
9,295 | #include "includes.h"
__global__ void globalMemOffsetKernel(/*TODO Parameters*/)
{
/*TODO Kernel Code*/
} |
9,296 | #include <stdio.h>
#include <string.h>
#include <time.h>
#include <stdlib.h>
__global__ void sum_arr_on_host(float *A, float *B, float *C, const int N) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
C[i] = A[i] + B[i];
}
void init_data(float *arr, int size) {
time_t t;
srand((unsigned int)time(&t));
for (int i = 0; i < size; i++) {
arr[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
int main(int argc, char **argv) {
int num_elems = 16000000;
int num_bytes = num_elems * sizeof(float);
float *A, *B, *C;
int blockSize = 256;
int numBlocks = (num_elems + blockSize -1) / blockSize;
cudaMallocManaged(&A, num_bytes);
cudaMallocManaged(&B, num_bytes);
cudaMallocManaged(&C, num_bytes);
init_data(A, num_elems);
init_data(B, num_elems);
init_data(C, num_elems);
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(A, num_bytes, device, NULL);
cudaMemPrefetchAsync(B, num_bytes, device, NULL);
sum_arr_on_host<<<numBlocks, blockSize>>>(A, B, C, num_elems);
cudaFree(A);
cudaFree(B);
cudaFree(C);
return 0;
} |
9,297 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
// Matrix multiplication: AxB=C
//CUDA kernel. Each thread takes care of one cell of C matrix
__global__ void matmul(double *a, double *b, double *c, int n)
{
// Get global thread ID
int Col = blockIdx.x*blockDim.x+threadIdx.x;
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Not out of bounds
if((Col<n) && (Row<n)) {// Mutliply matrices
// c[Row*n + Col] = 0;
double sum = 0.0;
for(int k=0;k<n;k++) {
// c[Row*n + Col] += a[Row*n+k]*b[k*n+Col];
sum += a[Row*n+k]*b[k*n+Col];
}
c[Row*n + Col] = sum;
}
}
extern "C" void matmul_wrapper(int n, double h_a[], double h_b[], double h_c[])
{
// Device input matrices
double *d_a;
double *d_b;
// Device output matrices
double *d_c;
//Size, in bytes, of each array
size_t bytes = n*n*sizeof(double);
// Allocate memory for each matrix on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
printf(" C Memory allocated \n");
// Copy host matrices to device
cudaMemcpy(d_a,h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b, bytes, cudaMemcpyHostToDevice);
printf(" C Data sent to GPU \n");
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 32;
// Number of thread blocks in grid
gridSize = (int)ceil((double)n/blockSize);
dim3 dimBlock(blockSize,blockSize);
dim3 dimGrid(gridSize,gridSize);
printf(" GridSize: %d\n", gridSize);
printf(" BlockSize: %d\n", blockSize);
// Execute the kernel
matmul<<<dimGrid, dimBlock>>>(d_a,d_b,d_c, n);
printf(" C Kernel executed \n");
// Copy array back to host
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
// CHECK RESULTS for 3x3 MATRIX
// printf("%f %f %f\n",h_a[0],h_a[1],h_a[2]);
// printf("%f %f %f\n",h_a[3],h_a[4],h_a[5]);
// printf("%f %f %f\n",h_a[6],h_a[7],h_a[8]);
// printf("\n");
// printf("%f %f %f\n",h_b[0],h_b[1],h_b[2]);
// printf("%f %f %f\n",h_b[3],h_b[4],h_b[5]);
// printf("%f %f %f\n",h_b[6],h_b[7],h_b[8]);
// printf("\n");
// printf("%f %f %f\n",h_c[0],h_c[1],h_c[2]);
// printf("%f %f %f\n",h_c[3],h_c[4],h_c[5]);
// printf("%f %f %f\n",h_c[6],h_c[7],h_c[8]);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf(" C =============== \n");
}
|
9,298 | #include "includes.h"
__global__ void process(int N_step, int N_inst, float *input, float *output){
int g_id = blockIdx.x * blockDim.x + threadIdx.x;
if(g_id >= N_inst) return;
float ans = 0.;
for(int t=0;t<N_step;++t){
for(int i=0;i<12;++i){
ans += input[(i+t)%VEC_SIZE + VEC_SIZE * g_id];
}
}
output[g_id] = ans;
return;
} |
9,299 | // nvcc sum_matrix.cu -o sum_matrix.out
#include <stdio.h>
#define N 10
void print_matrix(int** mat);
int **make_matrix(int size);
void matrix_sum_serial(int **A, int **B, int **C);
__global__ void matrix_sum_cuda(int** a, int** b, int** c);
int main(void){
int **a, **b, **c;
int **d_a, **d_b, **d_c;
a = make_matrix(N);
b = make_matrix(N);
c = make_matrix(N);
cudaMalloc( (void**) &d_a, N);
cudaMalloc( (void**) &d_b, N);
cudaMalloc( (void**) &d_c, N);
cudaMemcpy(d_a, a, N * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, N * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, N * N, cudaMemcpyHostToDevice);
// matrix_sum_serial(a, b, c);
matrix_sum_cuda<<<N,N>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, N * N, cudaMemcpyDeviceToHost);
printf("a: \n"); print_matrix(a);
printf("b: \n"); print_matrix(b);
printf("c: \n"); print_matrix(c);
exit(0);
}
int **make_matrix(int size){
int** mat = (int**) malloc(sizeof(int*) * size);
mat[0] = (int*) malloc(sizeof(int) * size * size);
for(int i = 0; i < size; i++){
mat[i] = &mat[0][size * i];
for(int j = 0; j < size; j++){
mat[i][j] = rand() % size;
}
}
return mat;
}
void print_matrix(int** mat){
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
printf("%3d ", mat[i][j]);
}
printf("\n");
}
}
void matrix_sum_serial(int **A, int **B, int **C){
int i,j;
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
C[i][j] = A[i][j] + B[i][j];
}
}
}
__global__ void matrix_sum_cuda(int** a, int** b, int** c){
int j = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockDim.y * blockIdx.y + threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
|
9,300 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <stdbool.h>
#include <queue>
#include <ctime>
#include "kernels.cuh"
#define H_max 1000
using namespace std;
/* Reads an .ac file by argument and calculates the circuit output and
* partial derivatives for every node.
* The circuit is stored in an adjacency list.
* Each non-leaf node storess its children in a separate array.
*/
/* Node Structure
Children and flag only apply to non-leaf nodes*/
struct node {
/*Node type can be 'n' or 'v' for leaf nodes
Node type can be '+' or '-' for non-leaf nodes */
char nodeType;
/*Node index, e.g. "Third variable: n=2*/
int index;
/*Value of the node*/
double vr;
/*Derivative of the node*/
double dr;
/*Children nodes*/
/*Currently assumes a binary AC (only two children per node)*/
//struct node **child;
int child[2];
/*Bit flag, true means there is exactly one child that is zero*/
bool flag;
/*Zero counter, if this counter is 1, set flag to true*/
/*Currently not used (assuming binary AC structure)*/
//int counter;
};
int h;
int *num;
// index is the number nodes: final value has the Root
struct node** levelOrder(struct node** circuit, int index){
queue<int> q;
q.push(index);
printf("value of index is %d\n",index);
struct node** result_circuit;
result_circuit = (struct node**)malloc(sizeof(struct node*) * (index+1));
struct node *current = (struct node*)malloc(sizeof(struct node));
int map[index+1];
for (int i=0; i<index+1; i++){
map[i] = -1;
}
//bool inserted = false;
int i = 0;
while(1){
int nodeCount = q.size();
//printf("value of nodeCount is %d\n", nodeCount);
if(nodeCount == 0)
break;
num[h++] = nodeCount;
while(nodeCount > 0){
//inserted = false;
current = circuit[q.front()];
if(map[q.front()]==-1) {
result_circuit[i++] = current;
map[q.front()] = i-1;
}
//inserted = true;
/*
if(current->child[0]!=-1){
result_circuit[i-1]->child[0] = i+q.size();
if(current->child[1]!=-1)
result_circuit[i-1]->child[1] = i+q.size()+1;
}
else if(current->child[1]!=-1) {
result_circuit[i-1]->child[1] = i+q.size();
}*/
//}
// else {
// printf("CAME HERE\n");
// }
q.pop();
if(current->child[0]!=-1){
q.push(current->child[0]);
}
if(current->child[1]!=-1){
q.push(current->child[1]);
}
nodeCount--;
}
}
for (int i=0;i<=index;i++){
result_circuit[i]->child[0] = result_circuit[i]->child[0]==-1 ? -1 : map[result_circuit[i]->child[0]];
result_circuit[i]->child[1] = result_circuit[i]->child[1]==-1 ? -1 : map[result_circuit[i]->child[1]];
//printf("value of i is %d and the child[1] id is %d and the node type is %c\n",i,result_circuit[i]->child[1], result_circuit[i]->nodeType);
}
return result_circuit;
}
int main(int argc, char** argv) {
FILE *ac_file;
char lineToRead[5000];
struct node **circuit;
struct node *n;
int index = 0;
/*Try to open the AC file*/
if (argc < 2) {
/*No file has been passed - error*/
fprintf(stderr, "Must pass AC file\n");
return(EXIT_FAILURE);
}
ac_file = fopen(argv[1], "r");
if (!ac_file) {
/* File does not exist*/
fprintf(stderr, "Unable to read file %s\n", argv[1]);
return(EXIT_FAILURE);
}
/*File was successfully read*/
while (fgets(lineToRead, 5000, ac_file) != NULL) {
//printf("%s", lineToRead);
if (*lineToRead == '(') {
printf("\t... reading file ...\n");
/*Allocate memory for the circuit*/
circuit = (struct node**)malloc(sizeof(struct node*) * 1000);
}
else if (*lineToRead == 'E'){
printf("\t... done reading file ... \n");
index--;
n->dr = 1;
}
else{
if (*lineToRead == 'n') {
/*Leaf node (Constant)*/
/*Insert node into circuit*/
n = (struct node*)malloc(sizeof(struct node));
sscanf(lineToRead, "%s %lf", &(n->nodeType), &(n->vr));
n->dr = 0;
n->flag = false;
n->child[0] = -1;
n->child[1] = -1;
}
else if (*lineToRead == 'v') {
/*Leaf node (Variable)*/
n = (struct node*)malloc(sizeof(struct node));
sscanf(lineToRead, "%s %d %lf", &(n->nodeType), &(n->index), &(n->vr));
n->dr = 0;
n->flag = false;
n->child[0] = -1;
n->child[1] = -1;
}
else if (*lineToRead == '+') {
/*Non-leaf (Operation)*/
n = (struct node*)malloc(sizeof(struct node));
/*"n->child" stores the index of the children nodes in the circuit*/
sscanf(lineToRead, "%s %d %d", &(n->nodeType), &(n->child[0]), &(n->child[1]));
n->flag = false;
n->vr = 0;
n->dr = 0;
/*Only add values if the flag is down*/
if (!circuit[n->child[0]]->flag) {
n->vr += circuit[n->child[0]]->vr;
}
if (!circuit[n->child[1]]->flag) {
n->vr += circuit[n->child[1]]->vr;
}
/*Incorrect output when using bit flags*/
//n->vr = circuit[n->child[0]]->vr + circuit[n->child[1]]->vr;
}
else if (*lineToRead == '*') {
/*Non-leaf (Operation)*/
n = (struct node*)malloc(sizeof(struct node));
sscanf(lineToRead, "%s %d %d", &(n->nodeType), &(n->child[0]), &(n->child[1]));
n->vr = 1;
n->dr = 0;
/*Raise bit flag if there is exactly one child with value equal to 0*/
if (circuit[n->child[0]]->vr == 0 && circuit[n->child[1]]->vr != 0) {
n->flag = true;
/*Set value to product of all other non-zero child nodes*/
if (!circuit[n->child[1]]->flag) {
n->vr = circuit[n->child[1]]->vr;
}
else {
n->vr = 0;
}
}
else if (circuit[n->child[0]]->vr != 0 && circuit[n->child[1]]->vr == 0) {
n->flag = true;
/*Set value to product of all other non-zero child nodes*/
if (!circuit[n->child[0]]->flag) {
n->vr = circuit[n->child[0]]->vr;
}
else {
n->vr = 0;
}
}
else {
n->flag = false;
if (!circuit[n->child[0]]->flag) {
n->vr *= circuit[n->child[0]]->vr;
}
else {
n->vr = 0;
}
if (!circuit[n->child[1]]->flag) {
n->vr *= circuit[n->child[1]]->vr;
}
else {
n->vr = 0;
}
}
}
//printf("node type: %c, vr: %lf, index: %d, flag %d\n", n->nodeType, n->vr, index, n->flag);
circuit[index] = n;
index++;
}
}
/*Print out circuit output*/
printf("output %lf\n\n", circuit[index]->vr);
//do level order traversal of this circuit array and write in new memory
struct node **circuit_level_order = (struct node**)malloc(sizeof(struct node*) * (index+1));
h = 0;
num = (int*)malloc(sizeof(int)*H_max);
circuit_level_order = levelOrder(circuit, index);
free(circuit);
/*Bit-encoded backpropagation in GPU*/
printf("\t... starting backpropagation in gpu...\n");
//declare variables
struct node** h_circuit;
struct node** d_circuit;
//allocate memory
h_circuit = (struct node**)malloc(sizeof(struct node*) * (index+1));
cudaMalloc((void***)&d_circuit, sizeof(struct node*) * (index+1));
//fill host with data
h_circuit = circuit_level_order;
//set up timing variables
float gpu_elapsed_time;
cudaEvent_t gpu_start, gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
//copy from host to device
cudaEventRecord(gpu_start,0);
cudaMemcpy(d_circuit, h_circuit, sizeof(struct node*)*(index+1), cudaMemcpyHostToDevice);
//call kernel
dim3 gridSize = 256;
dim3 blockSize = 256;
build_circuit<<< gridSize, blockSize >>>(d_circuit, index+1, h, num);
//copy from device to host
cudaMemcpy(h_circuit, d_circuit, sizeof(struct node*)*(index+1), cudaMemcpyDeviceToHost);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
//report results
cout<<"The gpu took: "<<gpu_elapsed_time<<" milli-seconds"<<std::endl;
for (int i = 0; i <= index; i++) {
printf("n%d t: %c, dr: %lf vr: %lf\n", i, h_circuit[i]->nodeType, h_circuit[i]->dr, h_circuit[i]->vr);
}
free(h_circuit);
/*Bit-encoded backpropagation*/
printf("\t... starting backpropagation in cpu...\n");
struct node* parent;
for (int i = 0; i <= index; i++) {
parent = circuit_level_order[i];
/*Assign dr values depending on parent node*/
if (parent->nodeType == '+') {
circuit_level_order[parent->child[0]]->dr = parent->dr;
circuit_level_order[parent->child[1]]->dr = parent->dr;
}
else if (parent->nodeType == '*') {
/*if bit flag is down, and parent is non-zero, dr(c) = dr(p)*vr(p)/vr(c)*/
if (parent->dr == 0) {
/*Set all child nodes dr to zero*/
circuit_level_order[parent->child[0]]->dr = 0;
circuit_level_order[parent->child[1]]->dr = 0;
}
else if (parent->flag) {
/*Check value of all child nodes*/
/*if flag is up and child is zero, then dr(c) = dr(p) * vr(p)*/
if (circuit_level_order[parent->child[0]]->vr == 0) {
circuit_level_order[parent->child[0]]->dr = parent->dr * parent->vr;
/*Set all other children dr to zero*/
circuit_level_order[parent->child[1]]->dr = 0;
}
else {
circuit_level_order[parent->child[1]]->dr = 0;
circuit_level_order[parent->child[0]]->dr = parent->dr *
(parent->vr / circuit_level_order[parent->child[0]]->vr);
}
}
else {
circuit_level_order[parent->child[1]]->dr = parent->dr *
(parent->vr / circuit_level_order[parent->child[1]]->vr);
circuit_level_order[parent->child[0]]->dr = parent->dr *
(parent->vr / circuit_level_order[parent->child[0]]->vr);
}
}
}
/*Free all nodes and circuit*/
for (int i = 0; i <= index; i++) {
printf("n%d t: %c, dr: %lf vr: %lf\n", i, circuit_level_order[i]->nodeType, circuit_level_order[i]->dr, circuit_level_order[i]->vr);
//free(circuit_level_order[i]);
//free(circuit[i]);
}
free(circuit_level_order);
/*Close file*/
fclose(ac_file);
return (EXIT_SUCCESS);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.