serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
23,001 | //##########################################################//
// Name: Kirtan Mali //
// Roll no: 18AG10016 //
// Question 3: Matrix Transpose using Dynamic Shared Mem //
//##########################################################//
#include <stdio.h>
#include <stdlib.h>
// Cuda Libraries
#include <cuda.h>
#include <cuda_runtime.h>
// Macro for error checking and debugging
#define CHECK(call) { \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
typedef long long int lli;
#define MAX_VAL 100
#define TILEDIM 32
#define BLOCK_ROWS 32
#define PAD 1
void printMat(float *matrix, lli n);
void transpose_CPU(float *matrix, float *output, int n);
float *createMat(lli n, int isempty, int seed);
__global__ void transposeCoalesced(float *matrix, float *output, lli n)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * TILEDIM + tx;
int y = blockIdx.y * TILEDIM + ty;
extern __shared__ float tile[];
for (int j=0; j<TILEDIM; j+=BLOCK_ROWS)
{
tile[(ty+j)*(TILEDIM+PAD) + tx] = matrix[(y+j)*n + x];
}
__syncthreads();
x = blockIdx.y * TILEDIM + tx;
y = blockIdx.x * TILEDIM + ty;
for (int j=0; j<TILEDIM; j+=BLOCK_ROWS)
{
output[(y+j)*n + x] = tile[tx*(TILEDIM+PAD) + ty+j];
}
}
int main(int argc, char **argv)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int isprint = 1;
if (argc > 1)
isprint = 0;
lli t;
scanf("%lld", &t);
while (t--)
{
lli n;
scanf("%lld", &n);
size_t size = sizeof(float) * n * n;
float *h_matrix = createMat(n, 0, t);
float *h_output = createMat(n, 1, t);
float *h_output_check = createMat(n, 1, t);
float *d_matrix = NULL;
float *d_output = NULL;
CHECK(cudaMalloc((void **)&d_matrix, size));
CHECK(cudaMalloc((void **)&d_output, size));
CHECK(cudaMemcpy(d_matrix, h_matrix, size, cudaMemcpyHostToDevice));
dim3 dimGrid(ceil((float)n/BLOCK_ROWS), ceil((float)n/BLOCK_ROWS), 1);
dim3 dimBlock(BLOCK_ROWS, BLOCK_ROWS, 1);
transposeCoalesced<<<dimGrid, dimBlock, TILEDIM*(TILEDIM+PAD)*sizeof(float)>>>(d_matrix, d_output, n);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch transposeCoalesced kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
CHECK(cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost));
// transpose_CPU(h_matrix, h_output_check, n);
if (isprint == 1)
{
printf("\n\n***** Original Matrix *****\n\n");
printMat(h_matrix, n);
printf("\n\n***** Transposed Matrix using GPU *****\n\n");
printMat(h_output, n);
// printf("\n\n***** Transposed Matrix using CPU *****\n\n");
// printMat(h_output_check, n);
}
}
return 0;
}
// Utility Functions
float *createMat(lli n, int isempty, int seed)
{
srand(seed+1);
size_t size = sizeof(float) * n * n;
float *matrix = (float *)malloc(size);
for (int i=0; i<n*n; i++)
{
if (isempty == 1)
matrix[i] = 0.0f;
else
matrix[i] = (float)rand()/((float)RAND_MAX/MAX_VAL);
}
return matrix;
}
void printMat(float *matrix, lli n)
{
for (lli i=0; i<n*n; i++)
{
printf("% 6.2f ", matrix[i]);
if (i % n == n-1)
printf("\n");
}
}
void transpose_CPU(float *matrix, float *output, int n)
{
for (int i=0; i<n; i++)
{
for (int j=0; j<n; j++)
{
output[i*n+j] = matrix[j*n+i];
}
}
} |
23,002 | #include "includes.h"
__global__ void vecmabite( int *out, int *in, std::size_t size )
{
auto tid = threadIdx.x;
out[ tid ] = in[ 2 * tid ];
} |
23,003 | #include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_real_distribution.h>
struct rng_transform{
int SEED;
__device__ __host__
double operator() (const int &i){
thrust::minstd_rand eng(SEED);
thrust::uniform_real_distribution<double> d(25, 40);
eng.discard(i);
return d(eng);
}
};
int main()
{
int seed;
std::cin >> seed;
thrust::minstd_rand eng(seed);
thrust::uniform_real_distribution<double> d(25, 40);
for(int i = 0; i< 10; i ++)
{
std::cout << d(eng) << "\n";
}
rng_transform rt = {.SEED = seed};
thrust::device_vector <double> vec(10);
thrust::transform(thrust::make_counting_iterator<int>(0),
thrust::make_counting_iterator<int>(10),
vec.begin(),
rt);
for(int i = 0; i< 10; i ++)
{
std::cout << vec[i] << "\n";
}
std::cout << "\n";
} |
23,004 | //
// Created by zhaoxuanzhu on 3/21/21.
//
#include "problem.cuh"
|
23,005 | extern __device__ __constant__ char d_coef[2];
char g_coef[2]={'a','b'};
void pre()
{
cudaMemcpyToSymbol(d_coef,g_coef,sizeof(char)*2);
}
|
23,006 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define N 1024
__global__ void CUDASubstring(char *A, char *C, int *sL, int *pFound)
{
int id = threadIdx.x;
if(id == 0 || A[id-1] == ' ')
{
int fMatch = 1;
for (int i = 0; i < *sL; i++)
{
if (A[id + i] != C[i])
fMatch = 0;
}
if (fMatch == 1)
{
atomicAdd(pFound, 1);
}
}
}
int main(int argc, char const *argv[])
{
char A[N];
char C[N];
char *pA, *pC;
//int *pL;
printf("Enter string(A): ");
scanf("%[^\n]%*c", A);
printf("Enter substring(C): ");
scanf("%[^\n]%*c", C);
int L = strlen(A);
int sL = strlen(C);
int *pFound, *psL;
//printf("C = \n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**)&pA, L * sizeof(char));
cudaMalloc((void**)&pC, sL * sizeof(char));
cudaMalloc((void**)&psL, sizeof(int));
cudaMalloc((void**)&pFound, sizeof(int));
int t = 0;
//cudaMemcpy(pA, A, L * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(pA, A, L * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(pC, C, sL * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(psL, &sL, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pFound, &t, sizeof(int), cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("CUDA Error1: %s\n", cudaGetErrorString(error));
}
CUDASubstring<<<1, L>>>(pA, pC, psL, pFound);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("CUDA Error2: %s\n", cudaGetErrorString(error));
}
int found;
cudaMemcpy(&found, pFound, sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
/*printf("Value of C in Host:\n");
for (int i = 0; i < N; ++i)
{
printf("%c\n", C[i]);
}*/
if(found != 0)
printf("Found %d times\n", found);
else
printf("Not Found\n");
printf("Time Taken = %f\n", elapsedTime);
cudaFree(pA);
cudaFree(pC);
return 0;
}
|
23,007 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
__device__ double logsumexp(double a, double b)
{
if(a <= -1e20)
{
return b;
}
else
if(b <= -1e20)
{
return a;
}
/*double diff = a-b;
if (diff < -20.0f)
{
return b;
}
else
if (diff > 20.0f)
{
return a;
}*/
if(a > b)
{
return a + log(1.0+exp(b-a));
}
else
{
return b + log(1.0+exp(a-b));
}
}
__device__ double safeadd(double a, double b)
{
if(a <= -1e20)
{
return b;
}
else
if(b <= -1e20)
{
return a;
}
return a+b;
}
/*
push!(rules, Rule('S', "LS",0.868534, 3))
push!(rules, Rule('S', "s",0.117609877998, 1))
push!(rules, Rule('S', "dFd",0.013856122002, 2))
push!(rules, Rule('F', "dFd",0.787640, 2))
push!(rules, Rule('F', "LS",0.21236, 3))
push!(rules, Rule('L', "s",0.894603, 1))
push!(rules, Rule('L', "dFd",0.105397, 2))
type1rules = Rule[rules[2],rules[6]]
type2rules = Rule[rules[3],rules[4],rules[7]]
type3rules = Rule[rules[1],rules[5]]
ruleindex = Dict('S' => 1, 'F' => 2, 'L' => 3)
*/
__constant__ int S = 0;
__constant__ int F = 1;
__constant__ int L = 2;
__constant__ double r1logprob = -0.14094854611;
__constant__ double r2logprob = -2.14038225046;
__constant__ double r3logprob = -4.27902812221;
__constant__ double r4logprob = -0.2387141463;
__constant__ double r5logprob = -1.549472331;
__constant__ double r6logprob = -0.11137523453;
__constant__ double r7logprob = -2.25002110628;
__global__ void insidealgorithm(double* inside, const double* pairedlogprobs, const double* unpairedlogprobs, const int b, const int len, const double BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = 0;
// type 3 rules
// rule 1
double tmp = -1e20;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, inside[L*len*len + j*len + h] + inside[S*len*len + (h+1)*len + (j+b)]);
}
index = S*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r1logprob*BT + tmp);
// rule 5
/*tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
double prob1 = inside[L*len*len + j*len + h];
double prob2 = inside[S*len*len + (h+1)*len + j+b];
tmp = logsumexp(tmp, prob1 + prob2);
}*/
index = F*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r5logprob*BT + tmp);
// type 2 rules
double v = pairedlogprobs[j*len+j+b]*BT + inside[F*len*len+(j+1)*len+ (j+b-1)];
// rule 3
index = S*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r3logprob*BT + v);
// rule 4
index = F*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r4logprob*BT + v);
// rule 7
index = L*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r7logprob*BT + v);
}
}
/*
push!(rules, Rule('S', "LS",0.868534, 3))
push!(rules, Rule('S', "s",0.117609877998, 1))
push!(rules, Rule('S', "dFd",0.013856122002, 2))
push!(rules, Rule('F', "dFd",0.787640, 2))
push!(rules, Rule('F', "LS",0.21236, 3))
push!(rules, Rule('L', "s",0.894603, 1))
push!(rules, Rule('L', "dFd",0.105397, 2))
type1rules = Rule[rules[2],rules[6]]
type2rules = Rule[rules[3],rules[4],rules[7]]
type3rules = Rule[rules[1],rules[5]]
ruleindex = Dict('S' => 1, 'F' => 2, 'L' => 3)
*/
__global__ void outsidealgorithm(double* outside, const double* inside, const double* pairedlogprobs, const double* unpairedlogprobs, const int b, const int len, const double BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len - b)
{
int index = 0;
// type 3 rules
// rule 1 Rule('S', "LS",0.868534, 3))
double tmp = -1e20;
for (int k = j + b + 1; k < len; k++)
{
tmp = logsumexp(tmp, outside[S*len*len + j*len + k] + inside[S*len*len + (j+b+1)*len + k]);
}
index = L*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r1logprob*BT + tmp);
tmp = -1e20;
for (int k = 0 ; k < j ; k++)
{
tmp = logsumexp(tmp, outside[S*len*len + k*len + j+b] + inside[L*len*len + k*len + j-1]);
}
index = S*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r1logprob*BT + tmp);
// rule 5 Rule('F', "LS",0.21236, 3)
tmp = -1e20;
for (int k = j + b + 1; k < len; k++)
{
tmp = logsumexp(tmp, outside[F*len*len + j*len + k] + inside[S*len*len + (j+b+1)*len + k]);
}
index = L*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r5logprob*BT + tmp);
tmp = -1e20;
for (int k = 0 ; k < j ; k++)
{
tmp = logsumexp(tmp, outside[F*len*len + k*len + j+b] + inside[L*len*len + k*len + j-1]);
}
index = S*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r5logprob*BT + tmp);
// type 2 rules
if ((j>=1) && (j+b+1<len))
{
double v = pairedlogprobs[(j-1)*len+(j+b+1)]*BT;
index = F*len*len + j*len + j+b;
// rule 3 Rule('S', "dFd",0.013856122002, 2)
outside[index] = logsumexp(outside[index], r3logprob*BT + outside[S*len*len + (j-1)*len + j+b+1] + v);
// rule 4 Rule('F', "dFd",0.787640, 2)
outside[index] = logsumexp(outside[index], r4logprob*BT + outside[F*len*len + (j-1)*len + j+b+1] + v);
// rule 7 Rule('L', "dFd",0.105397, 2)
outside[index] = logsumexp(outside[index], r7logprob*BT + outside[L*len*len + (j-1)*len + j+b+1] + v);
}
}
}
|
23,008 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void revArray(int N, float *a, float *b) {
int n = threadIdx.x + blockIdx.x*blockDim.x;
if(n<N) {
b[N-1-n] = a[n];
}
}
int main(int argc, char **argv) {
int N = 100;
//Host memory allocation
float *h_a = (float*) malloc(N*sizeof(float));
float *h_b = (float*) malloc(N*sizeof(float));
int n;
for(n=0;n<N;n++) {
h_a[n] = 1+n;
}
// Device memory allocation
float *d_a, *d_b;
cudaMalloc(&d_a, N*sizeof(float));
cudaMalloc(&d_b, N*sizeof(float));
// Copy data from host to device
cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice);
//save this for later
int NthreadsPerBlock = 10;
int NthreadBlocks = (N+NthreadsPerBlock-1)/NthreadsPerBlock ;
revArray<<<NthreadBlocks, NthreadsPerBlock>>>(N,d_a,d_b);
//copy result from device to host
cudaMemcpy(h_b, d_b, N*sizeof(float), cudaMemcpyDeviceToHost);
for(n=0;n<N;++n) {
printf("h_b[%d] = %g\n",n,h_b[n]);
}
free(h_a);
free(h_b);
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
23,009 | #include "includes.h"
__global__ void weighted_interpolate_backward(int B, int N, int M, int C, int K, const int* nnIndex, const int* nnCount, const float* gradOutput, const float* weight, float* gradInput)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=threadIdx.x;j<N*C;j+=blockDim.x)
{
int n = j/C;
int c = j%C;
int nnSize = nnCount[i*N+n];
for(int k=0;k<nnSize;k++)
{
int m = nnIndex[i*N*K+n*K+k];
float w = weight[i*N*K+n*K+k];
atomicAdd(&gradInput[i*M*C+m*C+c],gradOutput[i*N*C+j]*w);
}
}
}
} |
23,010 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
#define N 6000 /* Matrix size */
float A[N][N], B[N][N];
int threadPerBlock=256;
int block=(int)N/threadPerBlock;
/* Initialize A and B*/
void initialize_inputs() {
int row, col;
srand((unsigned)time(NULL));
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
A[row][col] = (float)rand() / 32768.0;
B[row][col] = 0.0;
}
}
}
//Kernel Processing
__global__ void matrixNorm(float *d_A, float *d_B, int n) {
int col = blockIdx.x * blockDim.x + threadIdx.x; //unique id of thread within the grid
int row;
float mu, sigma;
// printf("%f\n",*d_A);
if (col < n){
mu = (float)0.0;
for (row=0; row < n; row++)
mu += d_A[col*n+row];
mu /= (float) n;
__syncthreads();
//printf("%f\n",mu);
sigma = (float)0.0;
for (row=0; row < n; row++)
sigma += powf(d_A[col*n+row] - mu, (float)2.0);
sigma /= (float) n;
__syncthreads();
// printf("row2 %d\n",col);
sigma = sqrt(sigma);
for (row=0; row < n; row++) {
if (sigma == (float)0.0)
d_B[row*n+col] = (float)0.0;
else
d_B[row*n+col] = (d_A[col*n+row] - mu) / sigma;
}
}
}
int main(int argc, char **argv) {
float *d_A, *d_B;
if(cudaMalloc((void **) &d_A, sizeof(float)*N*N)!=cudaSuccess){
return 0;
}
if(cudaMalloc((void **) &d_B, sizeof(float)*N*N)!=cudaSuccess){
cudaFree(d_A);
return 0;
}
if(cudaMemcpy(d_A, A, sizeof(float)*N*N, cudaMemcpyHostToDevice)!=cudaSuccess){
printf("inside this");
cudaFree(d_A);
cudaFree(d_B);
return 0;
}
/* Timing variables */
struct timeval start, stop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
unsigned long long runtime;
/* Initialize A and B */
initialize_inputs();
/* Start Clock */
printf("\n---------------------------------------------\n");
printf("Matrix size N = %d", N);
printf("\nStarting clock.\n\n");
gettimeofday(&start, &tzdummy);
/* Matrix Normalization */
matrixNorm<<<block,threadPerBlock>>>(d_A,d_B,N);
if(cudaMemcpy(B,d_B, sizeof(float)*N*N,cudaMemcpyDeviceToHost)!=cudaSuccess){
cudaFree(d_A);
cudaFree(d_B);
return 0;
}
else{
cudaFree(d_A);
cudaFree(d_B);
}
/* Stop Clock */
gettimeofday(&stop, &tzdummy);
runtime = (unsigned long long)(stop.tv_sec - start.tv_sec) * 1000000 + (stop.tv_usec - start.tv_usec);
/* Display timing results */
printf("Runtime = %g ms.\n", (float)runtime/(float)1000);
printf("\nStopped clock.");
printf("\n---------------------------------------------\n");
exit(0);
}
|
23,011 | //
// Created by gautam on 02/05/20.
//
#include "ColType.cuh"
ColType newColType() {
ColType c;
c.type = TYPE_INVALID;
c.size = 0;
// c.str = "";
strcpy(c.str, "");
return c;
}
ColType newColType(std::string typeString) {
ColType c;
// utils::toLower(typeString);
// c.str = typeString;
strcpy(c.str, typeString.c_str());
if (typeString == "int") {
c.type = TYPE_INT;
c.size = 4;
} else if (typeString == "float") {
c.type = TYPE_FLOAT;
c.size = 4;
} else if (typeString == "boolean") {
c.type = TYPE_BOOL;
c.size = 1;
} else if (typeString == "datetime") {
c.type = TYPE_DATETIME;
c.size = 8;
} else {
if (typeString.length() < 10 || typeString[7] != '(' || typeString[typeString.length() - 1] != ')') {
// :TODO SOME ERROR OCCURRED
c.type = TYPE_INVALID;
c.size = 0;
} else {
std::string wd = typeString.substr(0, 7);
if(wd == "varchar") {
c.type = TYPE_VARCHAR;
std::stringstream val(typeString.substr(8, typeString.length()));
val >> c.size;
++c.size;
}
}
}
return c;
}
|
23,012 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o crackpwd_2alph2digt crackpwd_2alpha_2digit.cu
To Run:
./crackpwd_2alph2digt > resultscuda_2alp2dig.txt
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char password1[] = "DV72";
char password2[] = "ET21";
char password3[] = "IR24";
char password4[] = "SD49";
char *a = attempt;
char *y = attempt;
char *u = attempt;
char *s = attempt;
char *pass1 = password1;
char *pass2 = password2;
char *pass3 = password3;
char *pass4 = password4;
while(*a == *pass1) {
if(*a == '\0')
{
printf("Found password: %s\n",password1);
break;
}
a++;
pass1++;
}
while(*y == *pass2) {
if(*y == '\0')
{
printf("Found password: %s\n",password2);
break;
}
y++;
pass2++;
}
while(*u == *pass3) {
if(*u == '\0')
{
printf("Found password: %s\n",password3);
break;
}
u++;
pass3++;
}
while(*s == *pass4) {
if(*s == '\0')
{
printf("Found password: %s\n",password4);
return 1;
}
s++;
pass4++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
char h,t;
char password[5];
password[4] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstValue = i;
char secondValue = j;
password[0] = firstValue;
password[1] = secondValue;
for(h='0'; h<='9'; h++){
for(t='0'; t<='9'; t++){
password[2] = h;
password[3] = t;
if(is_a_match(password)) {
//printf("Success");
}
else {
//printf("tried: %s\n", password);
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26,26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
23,013 | #include <stdio.h>
__global__ void dummy()
{
int j = 0;
for(int i = 0; i < 1000000; i++)
j++;
}
int main()
{
cudaStream_t stream1, stream2;
double *A, *B, *C, *D;
cudaSetDevice(2);
cudaMalloc((void **) &C, 100000000 * sizeof(double));
cudaSetDevice(0);
cudaMalloc((void **) &D, 100000000 * sizeof(double));
cudaSetDevice(1);
cudaMalloc((void **) &A, 100000000 * sizeof(double));
cudaMalloc((void **) &B, 100000000 * sizeof(double));
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaDeviceEnablePeerAccess(2, 0);
cudaDeviceEnablePeerAccess(0, 0);
cudaMemcpyPeerAsync(C, 2, A, 1, 100000000 * sizeof(double), stream1);
cudaMemcpyPeerAsync(D, 0, B, 1, 100000000 * sizeof(double), stream2);
for(int i = 0; i < 3; i++)
{
cudaSetDevice(i);
cudaDeviceSynchronize();
}
return 0;
}
|
23,014 | /*
Author: Su Ming Yi
Date: 11/16/2018
Goal:
Add 2D array by cuda
How to compile it:
nvcc -O -o example_3 example_3.cu
How to run it:
./example_3
*/
#include "stdio.h"
#define COLUMNS 3
#define ROWS 2
__global__ void add(int *a, int *b, int *c)
{
int x = blockIdx.x;
int y = blockIdx.y;
int i = (COLUMNS*y) + x;
c[i] = a[i] + b[i];
}
int main()
{
int a[ROWS][COLUMNS], b[ROWS][COLUMNS], c[ROWS][COLUMNS];
int *dev_a, *dev_b, *dev_c;
// allocate Memory size in gpu
cudaMalloc((void **) &dev_a, ROWS*COLUMNS*sizeof(int));
cudaMalloc((void **) &dev_b, ROWS*COLUMNS*sizeof(int));
cudaMalloc((void **) &dev_c, ROWS*COLUMNS*sizeof(int));
// assign the value in 2D array
for (int y=0; y<ROWS; y++)
{
for (int x=0; x<COLUMNS;x++)
{
a[y][x] = x;
b[y][x] = y;
}
}
// copy the memory from Host to Device
cudaMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, ROWS*COLUMNS*sizeof(int), cudaMemcpyHostToDevice);
// Do not understand
dim3 grid(COLUMNS, ROWS);
add<<<grid,1>>>(dev_a, dev_b, dev_c);
// copy the memory from Device to Host
cudaMemcpy(c, dev_c, ROWS*COLUMNS*sizeof(int), cudaMemcpyDeviceToHost);
// Output the result
for (int y=0; y<ROWS;y++)
{
for( int x =0; x<COLUMNS;x++)
{
printf("[%d][%d] = %d", y, x, c[y][x]);
}
printf("\n");
}
return 0;
}
|
23,015 |
int lenInts, lenFloats, numPrototypes, numCoordinates;
int *d_ints;
float *d_floats;
float *d_prototypes;
float *d_activationRadii;
int *d_features;
void initialize(int _lenInts, int _lenFloats, int _numPrototypes, float *h_activationRadii){
lenInts = _lenInts;
lenFloats = _lenFloats;
numPrototypes = _numPrototypes;
numCoordinates = _lenInts + _lenFloats;
cudaMalloc((void **) &d_ints, lenInts*sizeof(int));
cudaMalloc((void **) &d_floats, lenFloats * sizeof(float));
// initialize random prototypes
float h_prototypes[numPrototypes*numCoordinates];
for (int i = 0; i < numPrototypes*numCoordinates; i++){
h_prototypes[i] = (float)rand()/(float)(RAND_MAX/1.0);
}
cudaMalloc((void **) &d_prototypes, numPrototypes*numCoordinates*sizeof(float));
cudaMemcpy(d_prototypes, h_prototypes, numPrototypes * numCoordinates * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_features, numPrototypes*sizeof(int));
cudaMalloc((void **) &d_activationRadii, lenFloats * sizeof(float));
cudaMemcpy(d_activationRadii, h_activationRadii, lenFloats * sizeof(float), cudaMemcpyHostToDevice);
}
// threadIdx.x = coord
// blockIdx.x = prototype
__global__ void calcFeatures(float *d_prototypes, float *d_floats, int lenFloats, int *d_ints, int lenInts, float *d_activationRadii, int *d_features){
float val = 0.0;
if (threadIdx.x < lenFloats){
float distance = fabsf(d_floats[threadIdx.x] - d_prototypes[blockIdx.x * (lenFloats + lenInts) + threadIdx.x]);
val = distance <= d_activationRadii[threadIdx.x] ? 1 - distance/d_activationRadii[threadIdx.x] : 0;
} else {
float distance = fabsf(((float) d_ints[threadIdx.x - lenFloats]) - d_prototypes[blockIdx.x * (lenFloats + lenInts) + threadIdx.x]);
val = distance <= d_activationRadii[threadIdx.x] ? 1 - distance/d_activationRadii[threadIdx.x] : 0;
}
atomicAnd(&d_features[blockIdx.x], val > 0 ? 1 : 0);
}
// TODO finish this
void parallel_getFeaturesActivationRadii(float *h_floatArr, int *h_intArr, int *h_features){
cudaMemset(d_features, 0xF, numPrototypes*sizeof(int));
cudaMemcpy(d_floats, h_floatArr, lenFloats*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_ints, h_intArr, lenInts * sizeof(int), cudaMemcpyHostToDevice);
calcFeatures<<<numPrototypes, numCoordinates>>>(d_prototypes, d_floats, lenFloats, d_ints, lenInts, d_activationRadii, d_features);
cudaMemcpy(h_features, d_features, numPrototypes * sizeof(float), cudaMemcpyDeviceToHost);
}
|
23,016 | #include "includes.h"
#define _USE_MATH_DEFINES
static void CheckCudaErrorAux(const char *, unsigned, const char *,
cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
__global__ void convolutionNoTiling(float *I, float *P, int channels, int width, int height) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int depth = threadIdx.z;
if (col < width && row < height && depth < channels) {
// Evaluate convolution
float pValue = 0;
int startRow = row - maskRowsRadius;
int startCol = col - maskColumnsRadius;
for (int i = 0; i < maskRows; i++) {
for (int j = 0; j < maskColumns; j++) {
int currentRow = startRow + i;
int currentCol = startCol + j;
float iValue;
// Check for ghost elements
if (currentRow >= 0 && currentRow < height && currentCol >= 0 && currentCol < width) {
iValue = I[(currentRow * width + currentCol) * channels + depth];
}
else {
iValue = 0.0f;
}
pValue += iValue * deviceMaskData[i * maskRows + j];
}
}
// Salva il risultato dal registro alla global
P[(row * width + col) * channels + depth] = pValue;
}
} |
23,017 | #include <stdio.h>
#include <unistd.h>
#define HANDLE_ERROR(x) {\
cudaError_t status = x;\
if (status) {\
printf("Error %d line %d\n", status, __LINE__);\
}\
}
const int chunk = 10;
const int limit = 100;
const int target = 2000;
#define check_and_add(M) {\
mult(X, M, Y);\
if (!dup(&queue, Y)) {\
addToArray(&queue, Y);\
}\
}
struct array {
int **data;
int length;
int cap;
};
__device__ void initArray(array *arr) {
arr->cap = 4;
arr->data = (int**) malloc(sizeof(int*)*arr->cap);
arr->length = 0;
}
__device__ void deinitArray(array *arr) {
if (arr) {
for (int i=0; i<arr->length; ++i) {
free(arr->data[i]);
}
free(arr->data);
}
}
__device__ void addToArray(array *arr, int *add) {
if (arr->length == arr->cap) {
int newcap = arr->cap * 2;
int **newdata = (int**) malloc(sizeof(int*)*newcap);
memcpy(newdata, arr->data, sizeof(int*)*arr->cap);
arr->cap = newcap;
arr->data = newdata;
}
int *newcpy = (int*) malloc(sizeof(int)*25);
memcpy(newcpy, add, sizeof(int)*25);
arr->data[arr->length++] = newcpy;
}
__device__ bool same(int *a, int *b) {
for (int i=0; i<25; ++i) {
if (a[i] != b[i]) {
return false;
}
}
return true;
}
__device__ bool dup(array *queue, int *arr) {
for (int i=0; i<queue->length; ++i) {
if (same(queue->data[i], arr)) {
return true;
}
}
return false;
}
__device__ void mult(int *a, int *b, int *c) {
for (int i=0; i<5; ++i) {
for (int j=0; j<5; ++j) {
c[i*5+j] = 0;
for (int k=0; k<5; ++k) {
c[i*5+j] += a[i*5+k]*b[k*5+j];
}
}
}
}
__global__ void find(int a, int b, int *ans) {
int baseA = a+blockIdx.x, baseB = b+threadIdx.x;
if (baseA < limit && baseB < limit) {
int A[25], B[25];
for (int i=0; i<25; ++i) {
A[i] = baseA % 3 - 1;
B[i] = baseB % 3 - 1;
baseA /= 3;
baseB /= 3;
}
array queue;
initArray(&queue);
addToArray(&queue, A);
addToArray(&queue, B);
int idx = 0;
while (idx < queue.length && queue.length < target) {
int *X = queue.data[idx];
int Y[25];
check_and_add(A);
check_and_add(B);
++idx;
}
if (idx == queue.length && queue.length > *ans) {
*ans = queue.length;
}
deinitArray(&queue);
}
}
int main() {
int *p;
HANDLE_ERROR(cudaMallocManaged(&p, sizeof(int)));
for (int i = 0; i<limit; i+= chunk) {
for (int j = 0; j<limit; j+= chunk) {
find<<<chunk, chunk>>>(i, j, p);
cudaError_t status = cudaDeviceSynchronize();
printf("status = %d\n", status);
printf("ans = %d\n", *p);
sleep(1);
}
}
}
|
23,018 | /**
* @Author: Giovanni Dalmasso <dalmasso>
* @Date: 14-Sep-2018
* @Email: giovanni.dalmasso@embl.es
* @Project: IntroToParallelProgramming
* @Last modified by: gioda
* @Last modified time: 14-Sep-2018
* @License: MIT
**/
#include <stdio.h>
// squaring number using CUDA
__global__ void square(float *d_out, float *d_in)
{
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
}
int main(int argc, char ** argv)
{
const int ARRAY_SIZE = 64;
const int ARRAY_BITES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for(int j=0; j<ARRAY_SIZE; j++)
{
h_in[j] = float(j);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BITES);
cudaMalloc((void **) &d_out, ARRAY_BITES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BITES, cudaMemcpyHostToDevice);
// lauch the kernel
square<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BITES, cudaMemcpyDeviceToHost);
// print out the resulting array
for(int j=0; j<ARRAY_SIZE; j++)
{
printf("%f", h_out[j]);
printf(((j % 4) !=3) ? "\t" : "\n");
}
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
23,019 | /*
* purpose: CUDA managed unified memory for >= pascal architectures;
* this version just uses cudaMallocManaged() on the host,
* then runs kernels on the GPU to add together two arrays
* of size 1 GB and save the results into a third array;
* n.b. here we want to stick to a separated initialization
* kernel, but then before running the actual compute
* kernel do the unified memory prefetching and see
* whether this will affect compute/memory bandwith/page
* faults performance;
* result: from profiling via 'nvprof ./a.out' we now see pretty
* much the best results so far, hence prefetching seems to
* really pay off ! interestingly the number of page faults
* has also decreased;
* compilation: nvcc ./unified_memory_example_4.cu
* usage: ./a.out
*/
#include <stdio.h>
#define ARRAYDIM 268435456
/*
* GPU kernel doing the initialization
*/
__global__ void KrnlDmmyInit(float *x, float *y, float *z)
{
int i;
i = (blockIdx.x * blockDim.x) + threadIdx.x;
x[i] = (float) i;
y[i] = (float) (i + 1);
return;
}
/*
* GPU kernel doing the calculation, ie adding together two arrays
*/
__global__ void KrnlDmmyCalc(float *x, float *y, float *z)
{
int i;
i = (blockIdx.x * blockDim.x) + threadIdx.x;
z[i] = x[i] + y[i];
return;
}
/*
* host main
*/
int main()
{
int i, cudaRtrn;
dim3 thrds_per_block, blcks_per_grid;
float *a, *b, *c;
/*
* Let us make use of cudaMallocManaged() to allocate 3 arrays
* of size 1 GB each for subsequent usage on the GPU.
*/
if (cudaRtrn = cudaMallocManaged(&a, ARRAYDIM * sizeof(float)) != 0) {
printf("*** allocation failed for array a[], %d ***\n", cudaRtrn);
}
if (cudaRtrn = cudaMallocManaged(&b, ARRAYDIM * sizeof(float)) != 0) {
printf("*** allocation failed for array b[], %d ***\n", cudaRtrn);
}
if (cudaRtrn = cudaMallocManaged(&c, ARRAYDIM * sizeof(float)) != 0) {
printf("*** allocation failed for array c[], %d ***\n", cudaRtrn);
}
/*
* next we want to call simple kernels that (i) initialize array
* elements a[] and b[] with thread-specific values and (ii) add
* together these values and store back the results into array c[]
* where the latter task shall be repeated within a loop over
* 100 iterations and memory be explicitly sent to the device
* with the help of prefetching
*/
thrds_per_block.x = 256;
blcks_per_grid.x = ARRAYDIM / thrds_per_block.x;
KrnlDmmyInit<<<blcks_per_grid, thrds_per_block>>>(a, b, c);
cudaDeviceSynchronize();
cudaMemPrefetchAsync(a, ARRAYDIM * sizeof(float), 0, NULL);
cudaMemPrefetchAsync(b, ARRAYDIM * sizeof(float), 0, NULL);
cudaMemPrefetchAsync(c, ARRAYDIM * sizeof(float), 0, NULL);
for (i=0; i<100; i++) {
KrnlDmmyCalc<<<blcks_per_grid, thrds_per_block>>>(a, b, c);
cudaDeviceSynchronize();
}
cudaFree(c);
cudaFree(b);
cudaFree(a);
return(0);
}
|
23,020 | #include "point.cuh"
// Default constructor
__host__ __device__
Point::Point()
{
this->x = 0;
this->y = 0;
this->z = 0;
}
// Normal constructor
__host__ __device__
Point::Point(float x, float y, float z)
{
this->x = x;
this->y = y;
this->z = z;
}
// Returns the norm of a point treated like a vector
__host__ __device__
Point Point::norm()
{
Point result;
result = *this;
float mag = sqrt(this->x * this->x +
this->y * this->y +
this->z * this->z);
result /= mag;
return result;
}
// Dot product of two points treated like vectors
__host__ __device__
float Point::dot(Point p)
{
return this->x * p.x + this->y * p.y + this->z * p.z;
}
// Cross product of two points treated like vectors
__host__ __device__
Point Point::cross(Point p)
{
Point result = Point(this->y * p.z - this->z * p.y,
this->z * p.x - this->x * p.z,
this->x * p.y - this->y * p.x);
return result;
}
// Calculates the Euclidean distance between 2 points
__host__ __device__
float Point::dist(Point p)
{
float dx = p.x - this->x;
float dy = p.y - this->y;
float dz = p.z - this->z;
return sqrt(dx * dx + dy * dy + dz * dz);
}
__host__ __device__
Point Point::cwiseMin(Point p)
{
float minx, miny, minz;
minx = (this->x < p.X()) ? this->x : p.X();
miny = (this->y < p.Y()) ? this->y : p.Y();
minz = (this->z < p.Z()) ? this->z : p.Z();
return Point (minx, miny, minz);
}
// OPERATOR OVERLOADS
__host__ __device__
Point Point::operator+(Point p)
{
Point result = Point(this->x + p.x,
this->y + p.y,
this->z + p.z);
return result;
}
__host__ __device__
Point Point::operator-(Point p)
{
Point result = Point(this->x - p.x,
this->y - p.y,
this->z - p.z);
return result;
}
__host__ __device__
Point Point::operator*(Point p)
{
Point result = Point(this->x * p.x,
this->y * p.y,
this->z * p.z);
return result;
}
__host__ __device__
Point Point::operator/(Point p)
{
Point result = Point(this->x / p.x,
this->y / p.y,
this->z / p.z);
return result;
}
__host__ __device__
Point Point::operator+=(Point p)
{
this->x += p.x;
this->y += p.y;
this->z += p.z;
return *this;
}
__host__ __device__
Point Point::operator-=(Point p)
{
this->x -= p.x;
this->y -= p.y;
this->z -= p.z;
return *this;
}
__host__ __device__
Point Point::operator*=(Point p)
{
this->x *= p.x;
this->y *= p.y;
this->z *= p.z;
return *this;
}
__host__ __device__
Point Point::operator/=(Point p)
{
this->x /= p.x;
this->y /= p.y;
this->z /= p.z;
return *this;
}
__host__ __device__
Point Point::operator+(float f)
{
Point result = Point(this->x + f,
this->y + f,
this->z + f);
return result;
}
__host__ __device__
Point Point::operator-(float f)
{
Point result = Point(this->x - f,
this->y - f,
this->z - f);
return result;
}
__host__ __device__
Point Point::operator*(float f)
{
Point result = Point(this->x * f,
this->y * f,
this->z * f);
return result;
}
__host__ __device__
Point Point::operator/(float f)
{
Point result = Point(this->x / f,
this->y / f,
this->z / f);
return result;
}
__host__ __device__
Point Point::operator+=(float f)
{
this->x += f;
this->y += f;
this->z += f;
return *this;
}
__host__ __device__
Point Point::operator-=(float f)
{
this->x -= f;
this->y -= f;
this->z -= f;
return *this;
}
__host__ __device__
Point Point::operator*=(float f)
{
this->x *= f;
this->y *= f;
this->z *= f;
return *this;
}
__host__ __device__
Point Point::operator/=(float f)
{
this->x /= f;
this->y /= f;
this->z /= f;
return *this;
}
__host__ __device__
Point Point::operator=(Point p)
{
this->x = p.x;
this->y = p.y;
this->z = p.z;
return *this;
}
__host__ __device__
bool Point::operator==(Point p)
{
if (this->x == p.x && this->y == p.y && this->z == p.z)
{
return true;
}
return false;
}
// Stream output operator
__host__
std::ostream& operator<<(std::ostream &out, Point p)
{
out << p.x << "\t" << p.y << "\t" << p.z << "\n";
return out;
}
/******************************************************************************/
/* Methods for Ray Class */
/******************************************************************************/
__host__ __device__
Ray::Ray()
{
// Default direction
this->x = 1;
this->y = 1;
this->z = 1;
// Located at the origin
this->posx = 0;
this->posy = 0;
this->posz = 0;
// Default color black
this->setColor(0, 0, 0);
this->t = FLT_MAX;
}
__host__ __device__
Ray::Ray(float X, float Y, float Z, float dX, float dY, float dZ)
{
this->x = dX;
this->y = dY;
this->z = dZ;
this->posx = X;
this->posy = Y;
this->posz = Z;
// Default color black
this->setColor(0, 0, 0);
this->t = FLT_MAX;
}
__host__ __device__
Ray::Ray(Point dp, Point p)
{
this->x = dp.X();
this->y = dp.Y();
this->z = dp.Z();
this->posx = p.X();
this->posy = p.Y();
this->posz = p.Z();
// Default color black./
this->setColor(0, 0, 0);
this->t = FLT_MAX;
}
__host__ __device__
void Ray::setColor(int r, int g, int b)
{
this->R = r;
this->G = g;
this->B = b;
}
__host__ __device__
void Ray::setDir(Point p)
{
this->x = p.X();
this->y = p.Y();
this->z = p.Z();
}
__host__ __device__
void Ray::setStart(Point p)
{
this->posx = p.X();
this->posy = p.Y();
this->posz = p.Z();
}
__host__ __device__
Point Ray::propagate(float time)
{
return Point(this->x * time + this->posx,
this->y * time + this->posy,
this->z * time + this->posz);
}
///////////////////////////////////////////////////////////////////////////////
// Point Light operations
///////////////////////////////////////////////////////////////////////////////
__host__ __device__
pointLight::pointLight()
{
this->setPos(5, 5, 5);
this->setColor(0, 140, 125);
this->setAtt_k(0.0005);
}
__host__ __device__
pointLight::pointLight(float X, float Y, float Z,
int r, int g, int b, float att_k)
{
this->setPos(X, Y, Z);
this->setColor(r, g, b);
this->setAtt_k(att_k);
}
__host__ __device__
pointLight::pointLight(Point p, int r, int g, int b, float att_k)
{
this->setPos(p);
this->setColor(r, g, b);
this->setAtt_k(att_k);
}
__host__ __device__
void pointLight::setColor(int r, int g, int b)
{
this->R = r;
this->G = g;
this->B = b;
}
__host__ __device__
Point pointLight::getColor()
{
return Point(this->R, this->G, this->B);
}
__host__ __device__
void pointLight::setAtt_k(float att_k)
{
this->attenuation_k = att_k;
}
__host__ __device__
float pointLight::getAtt_k()
{
return this->attenuation_k;
}
__host__ __device__
void pointLight::setPos(Point p)
{
this->setPos(p.X(), p.Y(), p.Z());
}
__host__ __device__
void pointLight::setPos(float X, float Y, float Z)
{
this->x = X;
this->y = Y;
this->z = Z;
}
__host__ __device__
Point pointLight::getPos()
{
return Point(this->x, this->y, this->z);
}
|
23,021 | #include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
struct sum_Functor {
int *sum;
sum_Functor(int *s){sum = s;}
__host__ __device__
void operator()(int i)
{
*sum+=i;
printf("In functor: i %d sum %d\n",i,*sum);
}
};
int main(){
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last = first+10;
int sum = 0;
sum_Functor sf(&sum);
printf("After constructor: value is %d\n", *(sf.sum));
for(int i=0;i<5;i++){
sf(i);
}
printf("Initiating for_each call - current value %d\n", (*(sf.sum)));
thrust::for_each(first,last,sf);
cudaDeviceSynchronize();
printf("After for_each: value is %d\n",*(sf.sum));
}
|
23,022 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <curand.h>
#include <curand_kernel.h>
#include "kernels.cuh"
__device__ __forceinline__
int get_polarity(int id) {
// If id is an even number, 1 will be returned.
// If id is an odd number, -1 will be returned.
return 1 - (2 * (id & 1));
}
__device__ __forceinline__
bool automata_action(unsigned int automata_state, unsigned int max_state) {
// Returns true if the automata is in an include state, while false if the automata is in an exclude state
return (automata_state > (static_cast<unsigned int>(max_state / 2)));
}
__global__
void validate_clauses(unsigned int* model, bool* clauses_output, unsigned int* x_data, unsigned int sample_id, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int class_id, unsigned int max_state, bool prediction)
{
// Declare some shared variables
// shared[0] = The output of the clause
// shared[1] = Boolean flag if all is in exclude mode
__shared__ bool shared[2];
// Calculate the clause id to work on
const int thread_id = threadIdx.x;
// Initialize some "private variables"
unsigned int sample_value;
unsigned int automata_value;
bool action;
int automata_polarity;
for (unsigned int clause_id = blockIdx.x; clause_id < clauses_amount; clause_id += gridDim.x) {
// Set the clause output to be true
if(thread_id == 0) {
shared[0] = true;
shared[1] = true;
}
// Wait until all threads are ready
__syncthreads();
// Loop over each of the automata and "stride" through
for(unsigned int automata_id = thread_id; automata_id < automatas_amount; automata_id += blockDim.x) {
// Check if any of the other threads have evaluated the clause to false. This way we could skip checking.
if(shared[0] == false) {
break;
}
// Get the automatas value
automata_value = model[(class_id * clauses_amount * automatas_amount) + (clause_id * automatas_amount) + automata_id];
// Get the action of the automata
action = automata_action(automata_value, max_state);
// Check if the automata is in an include state, if so, investigate further...
if(action == true) {
// Calculate the polarity of the automata
automata_polarity = get_polarity(automata_id);
// Get the sample's value
sample_value = x_data[(sample_id * features_amount) + (automata_id / 2)];
// Flip the flag that says that all automatas are in exclude mode
shared[1] = false;
// Since the automata is in an include state, lets check if the DOES NOT match the desired value
if(((automata_polarity == 1) && (sample_value != 1)) || ((automata_polarity == -1) && (sample_value != 0))){
// A condition has been met that would falsify the entire clause. Therefore, evaluate the entire clause to false
shared[0] = false;
break;
}
}
}
// Wait until all threads to evaluate until finished
__syncthreads();
// Check if we are thread id 0
if(thread_id == 0)
{
// Check if the clause was, when finished evaluating, evaluated to false
if(shared[0] == false || (prediction == true && shared[1] == true)) {
clauses_output[clause_id] = false;
}
// Assuming it was not false, then it is true
else {
clauses_output[clause_id] = true;
}
}
}
}
__global__
void reduce_votes(int* scores, unsigned int scores_index, bool* clauses_output, unsigned int clauses_amount, unsigned int threshold) {
// Tempromary shared results
extern __shared__ int results[];
// Declare some private variables
int thread_result = 0;
for(unsigned int clause_id = threadIdx.x; clause_id < clauses_amount; clause_id += blockDim.x) {
// Add the score to this threads tempromary score
thread_result += (get_polarity(clause_id) * clauses_output[clause_id]);
}
// Move the threads result into shared memory
results[threadIdx.x] = thread_result;
// Wait until all the threads have completed the summation of all clause outputs
__syncthreads();
// Start to reduce the threads and score
for(unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) {
// Check if this thread is doing some reduction
if(threadIdx.x < offset) {
results[threadIdx.x] += results[threadIdx.x + offset];
}
__syncthreads();
}
// Thread 0 will store the result in the scores list
if(threadIdx.x == 0)
{
if(threshold != 0) {
if(results[threadIdx.x] > threshold) {
results[threadIdx.x] = static_cast<int>(threshold);
}
else if(results[threadIdx.x] < -threshold) {
results[threadIdx.x] = -static_cast<int>(threshold);
}
}
scores[scores_index] = results[threadIdx.x];
}
}
__global__
void calculate_feedback(unsigned int* clauses_feedback, int* scores, unsigned int threshold, float s, unsigned int class_id, bool correct_class, unsigned int clauses_amount, curandState* random_states) {
// Calculate the position of the thread
unsigned int global_thread_id = (blockIdx.x * blockDim.x) + threadIdx.x;
// Declare some private variables
curandState rnd_state = random_states[global_thread_id];
float clause_polarity;
int class_score = scores[0];
// Loop all clauses
for (unsigned int clause_id = global_thread_id; clause_id < clauses_amount; clause_id += gridDim.x) {
// Determine the polarity of the clause
clause_polarity = static_cast<float>(get_polarity(clause_id));
// Check if we are on the correct class
if (correct_class == true) {
// Check if we are to skip feedback for this clause
if(curand_uniform(&rnd_state) > (((1.0f * threshold) - class_score) / (2.0f * threshold))) {
// No feedback will be given to this clause
clauses_feedback[clause_id] = 0;
}
else {
// A small performant operation that calculates that will return the following
// Clauses for = Type 1 feedback
// Clauses against = Type 2 feedback
clauses_feedback[clause_id] = 1 + static_cast<int>(signbit(clause_polarity));
}
}
else {
// Check if we are to skip feedback for this clause
if(curand_uniform(&rnd_state) > (((1.0f * threshold) + class_score) / (2.0f * threshold))) {
// No feedback will be given to this clause
clauses_feedback[clause_id] = 0;
}
else {
// A small performant operation that calculates that will return the following
// Clauses for = Type 2 feedback
// Clauses against = Type 1 feedback
clauses_feedback[clause_id] = 2 - static_cast<int>(signbit(clause_polarity));
}
}
}
// Copy the random state back to global memory
random_states[global_thread_id] = rnd_state;
}
__global__
void give_feedback_to_clauses(unsigned int* model, unsigned int* clauses_feedback, unsigned int* x_data, bool* clauses_output, unsigned int class_id, unsigned int sample_id, const bool correct_class, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int max_state, unsigned int threshold, float s, curandState* random_states) {
// Calculate and declare some "private variables"
// Get the clause id, based on the block id in the grid
unsigned int global_thread_id = (blockIdx.x * blockDim.x) + threadIdx.x;
// Used to calculate the absolute index of an automata
unsigned int automata_model_index;
unsigned int automata_temp;
// Used to tempromary store whether an automata is in include or exclude state
bool action;
// Used to tempromary store the polarity of an automata
int automata_polarity;
// Used to tempromary store the feature id of which feature an automata is associated with
unsigned int sample_value;
// Get the random state from the random values matrix (used to generate "random" numbers)
curandState rnd_state = random_states[global_thread_id];
// In case there are more clauses than blocks, we need to loop them
for(unsigned int clause_id = blockIdx.x; clause_id < clauses_amount; clause_id += gridDim.x) {
// Check if we are to do type 1 feedback
if(clauses_feedback[clause_id] == 1){
// If the clause output was evaluated to false
if(clauses_output[clause_id] == 0) {
// Loop and potentially punish all automatas
for(unsigned int automata_index = threadIdx.x; automata_index < automatas_amount; automata_index += blockDim.x) {
// Calculate the position of the current automata
automata_model_index = (class_id * clauses_amount * automatas_amount) + (clause_id * automatas_amount) + automata_index;
// Get the value for the automata
automata_temp = model[automata_model_index];
if((automata_temp > 1) && (curand_uniform(&rnd_state) <= (1.0 / s))) {
model[automata_model_index] = automata_temp - 1;
}
}
}
else {
// Loop over each of the automatas
for(unsigned int automata_index = threadIdx.x; automata_index < automatas_amount; automata_index += blockDim.x){
// Calculate the position of the current automata
automata_model_index = (class_id * clauses_amount * automatas_amount) + (clause_id * automatas_amount) + automata_index;
// Get the value of the sample for the current automata
sample_value = x_data[(sample_id * features_amount) + static_cast<unsigned int>(automata_index / 2)];
// Calculate the polarity of the automata
automata_polarity = get_polarity(automata_index);
// Get the value for the automata
automata_temp = model[automata_model_index];
// Check if the sample was False
if(sample_value == 0) {
// Check if the automata is an against automata
if(automata_polarity == -1){
// Increment state
if((curand_uniform(&rnd_state) <= ((s - 1.0) / s)) && (automata_temp < max_state)) {
model[automata_model_index] = automata_temp + 1;
}
}
// Assumes that the automata is a for automata (since it is not an against automata)
else {
// Decrement state
if((curand_uniform(&rnd_state) <= (1.0 / s)) && automata_temp > 1) {
model[automata_model_index] = automata_temp - 1;
}
}
}
// Assumes that the sample is 1 (since it was not 0)
else {
// Check if the automata is a for automata
if(automata_polarity == 1) {
// Decrement the state
if((curand_uniform(&rnd_state) <= ((s - 1.0) / s)) && (automata_temp < max_state)) {
model[automata_model_index] = automata_temp + 1;
}
}
// Assumes that the automata is an against automata (since it is not an for automata)
else {
// Decrement state
if((curand_uniform(&rnd_state) <= (1.0 / s)) && automata_temp > 1) {
model[automata_model_index] = automata_temp - 1;
}
}
}
}
}
}
// Check if we are to do type 2 feedback
else if(clauses_feedback[clause_id] == 2) {
// Check if the clause was evaluated to true in the evaluation phase.
if(clauses_output[clause_id] == 1) {
// Loop over all the automatas
for(unsigned int automata_id = threadIdx.x; automata_id < automatas_amount; automata_id += blockDim.x) {
// Calculate the automata model index
automata_model_index = (class_id * clauses_amount * automatas_amount) + (clause_id * automatas_amount) + automata_id;
// Get the automata value
automata_temp = model[automata_model_index];
// Get the sample's value
sample_value = x_data[(sample_id * features_amount) + (automata_id / 2)];
// Calculate the polarity of the automata
automata_polarity = get_polarity(automata_id);
// Get the include/exclude action for the automata
action = automata_action(automata_temp, max_state);
// Check if the automata is an for automata and that the feature is 0
if((automata_polarity == 1) && (sample_value == 0) && (action == false) && (automata_temp < max_state)){
model[automata_model_index] = automata_temp + 1;
}
else if((automata_polarity == -1) && (sample_value == 1) && (action == false) && (automata_temp < max_state)){
model[automata_model_index] = automata_temp + 1;
}
}
}
}
}
// Some cleanup and persistence before exiting
// Copy back the random state
random_states[global_thread_id] = rnd_state;
}
__global__
void improved_feedback(unsigned int* model, unsigned int* clauses_feedback, unsigned int* x_data, bool* clauses_output, unsigned int class_id, unsigned int sample_id, const bool correct_class, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int max_state, unsigned int threshold, float s, curandState* random_states) {
// Data indexing
unsigned int global_thread_id = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int clause_id;
unsigned int automata_id;
unsigned int feature_id;
// Model indexing
unsigned int model_index;
// Used to tempromary store the polarity of an automata
int automata_polarity;
unsigned int clause_feedback_type;
bool clause_output;
unsigned int automata_value;
unsigned int sample_value;
// Get the random state from the random values matrix (used to generate "random" numbers)
curandState rnd_state = random_states[global_thread_id];
// In case there are more clauses than blocks, we need to loop them
for(unsigned int model_relative_index = global_thread_id; model_relative_index < (clauses_amount * automatas_amount); model_relative_index += gridDim.x) {
// Calculate the position in the model
model_index = (class_id * clauses_amount * automatas_amount) + model_relative_index;
clause_id = model_relative_index / automatas_amount;
automata_id = model_relative_index % automatas_amount;
feature_id = automata_id / 2;
// Get the feedback type
clause_feedback_type = clauses_feedback[clause_id];
clause_output = clauses_output[clause_id];
// Get the clause value and sample value
automata_polarity = get_polarity(automata_id);
automata_value = model[model_index];
sample_value = x_data[(sample_id * features_amount) + feature_id];
// Check if we are to do type 1 feedback
if(clause_feedback_type == 1){
// If the clause output was evaluated to false
if(clause_output == false) {
// Punish the automata if the following conditions are met
model[model_index] = automata_value - static_cast<int>((curand_uniform(&rnd_state) <= (1.0f / s)) && (automata_value > 1));
}
else {
// Check if the sample was False
if(sample_value == 0) {
// Check if the automata is an against automata
if(automata_polarity == -1){
// Increment state if the conditions are met
model[model_index] = automata_value + static_cast<int>((curand_uniform(&rnd_state) <= ((s - 1.0f) / s)) && (automata_value < max_state));
}
// Assumes that the automata is a for automata (since it is not an against automata)
else {
// Decrement the state if the conditions are met
model[model_index] = automata_value - static_cast<int>((curand_uniform(&rnd_state) <= (1.0f / s)) && automata_value > 1);
}
}
// Assumes that the sample is 1 (since it was not 0)
else {
// Check if the automata is a for automata
if(automata_polarity == 1) {
// Decrement the state if the conditions are met
model[model_index] = automata_value + static_cast<int>((curand_uniform(&rnd_state) <= ((s - 1.0f) / s)) && (automata_value < max_state));
}
// Assumes that the automata is an against automata (since it is not an for automata)
else {
// Increment state if the conditions are met
model[model_index] = automata_value - static_cast<int>((curand_uniform(&rnd_state) <= (1.0f / s)) && automata_value > 1);
}
}
}
}
// Check if we are to do type 2 feedback
else if(clause_feedback_type == 2) {
// Check if the clause was evaluated to true in the evaluation phase.
if(clause_output == true) {
// Increment the state if the conditions are met
model[model_index] = automata_value + static_cast<int>(((automata_polarity == 1) && (sample_value == 0)) || ((automata_polarity == -1) && (sample_value == 1)) && (automata_action(automata_value, max_state) == false) && (automata_value < max_state));
}
}
}
// Some cleanup and persistence before exiting
// Copy back the random state
random_states[global_thread_id] = rnd_state;
}
__global__
void initialize_random_states(curandState* states, int seed, unsigned int amount_of_states) {
// Calculate the global thread id
unsigned int global_thread_id = ((blockIdx.x * blockDim.x) + threadIdx.x);
// Calculate the offset (to make it a "bit more random")
int offset = seed+global_thread_id;
for(unsigned int index = global_thread_id; index < amount_of_states; index += gridDim.x) {
// Initialize the random state
curand_init(seed, index, offset, &states[index]);
}
}
|
23,023 | #include<cuda_runtime.h>
// Kernel definition
__global__ void MatAdd(float A, float B, float C)
{
int i = threadIdx.x;
int j = threadIdx.y;
C= A + B;
}
|
23,024 | #include <iostream>
#include <fstream>
#include <cmath>
#include <cstdlib>
#include <string>
#include <iomanip>
#define T_P_B 1024
///////////////////////////// Global variables /////////////////////////////////
std::string dimension; // grid dimension
float k; // k-step
int timesteps; // num of timesteps
int width, height, depth; // grid size
float startTemp, fixedTemp; // node start temp
int heat_x, heat_y, heat_z, // fixed heater vars
heat_w, heat_h, heat_d;
float *d_old, *d_new, *d_heaters, // grids for values
*g_old, *g_new, *heaters;
///////////////////////////// CUDA Functions ///////////////////////////////////
__global__ void heat_sim(float *oldg, float * newg, float *fixed,
int width, int height, int depth, float k)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
float left, right, up, down, above, below;
float old = oldg[idx];
if (idx < (width*height*depth))
{
if (fixed[idx] != 0) newg[idx] = fixed[idx];
else if (fixed[idx] == 0)
{
// x-, x+
((idx%width) == 0) ? (left = old) : (left = oldg[idx-1]);
((idx%width) == (width-1)) ? (right = old) : (right = oldg[idx+1]);
// y-, y+
(idx%(width*height) < width) ? (up = old) : (up = oldg[idx - width]);
(idx%(width*height) >= ((height-1)*width))
? (down = old) : (down = oldg[idx + width]);
// z-, z+
if (depth <= 1)
{
above = 0.0;
below = 0.0;
newg[idx] = oldg[idx] + k*(up+down+left+right-(4.0*oldg[idx]));
}
else if (depth > 1)
{
if (idx < (width*height)) above = old;
else above = oldg[idx - (width*height)];
if (idx >= ((depth-1)*(width*height))) below = old;
else below = oldg[idx + (width*height)];
newg[idx] = oldg[idx] + k*(up+down+left
+right+above+below-(6.0*oldg[idx]));
}
}
}
}
__global__ void grid_cpy(float *oldg, float *newg, int size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < size)
oldg[idx] = newg[idx];
}
///////////////////////////// End CUDA Functions ///////////////////////////////
int main(int argc, char * argv[])
{
///////////////////////////// Config file parser ///////////////////////////////
std::ifstream conf(argv[1]);
if (conf.is_open())
{
std::string line;
while (getline(conf, line)){
if ((line[0] == '#') || line.empty() || line[0] == '\r')
continue;
// get dimension
while ((line[0] == '#') || line.empty() || line[0] == '\r')
getline(conf,line);
dimension = line.substr(0,2);
// get k value
getline(conf, line);
while ((line[0] == '#') || line.empty() || line[0] == '\r')
getline(conf,line);
k = std::stof(line);
// get timesteps
getline(conf, line);
while ((line[0] == '#') || line.empty() || line[0] == '\r')
getline(conf,line);
timesteps = std::stoi(line);
// get grid size
getline(conf, line);
while ((line[0] == '#') || line.empty() || line[0] == '\r')
getline(conf,line);
int comma = line.find(',');
width = std::stoi(line.substr(0, comma));
line = line.substr(comma+1);
if (dimension == "2D"){
height = std::stoi(line);
depth = 1;
}
else if (dimension == "3D"){
comma = line.find(',');
height = std::stoi(line.substr(0, comma));
depth = std::stoi(line.substr(comma+1));
}
// get block start temp
getline(conf, line);
while ((line[0] == '#') || line.empty() || line[0] == '\r')
getline(conf,line);
startTemp = std::stof(line);
// create heaters
heaters = new float[width*height*depth];
std::fill(heaters, heaters+(width*height*depth), 0);
while(getline(conf, line)){
if (line[0] == '#' || line.empty() || line[0] == '\r')
continue;
int comma = line.find(',');
heat_x = std::stoi(line.substr(0, comma));
line = line.substr(comma+1);
comma = line.find(',');
heat_y = std::stoi(line.substr(0, comma));
line = line.substr(comma+1);
comma = line.find(',');
if (dimension == "2D"){
heat_w = std::stoi(line.substr(0, comma));
line = line.substr(comma+1);
comma = line.find(',');
heat_h = std::stoi(line.substr(0, comma));
line = line.substr(comma+1);
heat_d = 1;
heat_z = 0;
fixedTemp = std::stof(line);
}
else if (dimension == "3D"){
heat_z = std::stoi(line.substr(0, comma));
line = line.substr(comma+1);
comma = line.find(',');
heat_w = std::stoi(line.substr(0, comma));
line = line.substr(comma+1);
comma = line.find(',');
heat_h = std::stoi(line.substr(0, comma));
line = line.substr(comma+1);
comma = line.find(',');
heat_d = std::stoi(line.substr(0, comma));
line = line.substr(comma+1);
fixedTemp = std::stof(line);
}
for (int i = heat_x+width*heat_y;
i < heat_x+heat_w+width*heat_y; i++)
for (int j = 0; j < heat_h; j++)
for (int k = heat_z; k < heat_z+heat_d; k++)
heaters[i+(j*width)+(k*width*height)] = fixedTemp;
}
}
}
else std::cerr << "Couldn't open config file.";
////////////////////////// End config file parser //////////////////////////////
int dim = width*height*depth;
// set up host grids
g_old = new float[dim];
g_new = new float[dim];
std::fill(g_new, g_new+dim, 0);
std::fill(g_old, g_old+dim, 0);
for (int i = 0; i < dim; i++)
{
g_old[i] = startTemp;
if (heaters[i] != 0) g_old[i] = heaters[i];
}
// allocate blockSize - must be at least one block
int blockSize = ceil(float(dim)/float(T_P_B));
// allocate device memory in 1D array
cudaMalloc((void**)&d_new, dim*sizeof(float));
cudaMalloc((void**)&d_old, dim*sizeof(float));
cudaMalloc((void**)&d_heaters, dim*sizeof(float));
// copy filled arrays from host to device
cudaMemcpy(d_old, g_old, dim*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_new, g_new, dim*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_heaters, heaters, dim*sizeof(float), cudaMemcpyHostToDevice);
// run kernels
for (int t = 0; t < timesteps; t++)
{
heat_sim<<<blockSize, T_P_B>>> (d_old, d_new, d_heaters,
width, height, depth, k);
cudaDeviceSynchronize();
grid_cpy<<< blockSize, T_P_B>>> (d_old, d_new, dim);
cudaDeviceSynchronize();
}
// copy data back from device to host
cudaMemcpy(g_new, d_new, dim*sizeof(float), cudaMemcpyDeviceToHost);
// print out to csv
std::ofstream csv("../heatOutput.csv", std::ios::out);
if (csv.is_open()){
for (int i = 0; i < dim; i++)
{
if (i%width == width-1) csv << g_new[i] << std::endl;
else csv << g_new[i] << ", ";
if (i%(width*height) == (width*height)-1) csv << std::endl;
}
}
else
std::cout << "Unable to open file, try again." << std::endl;
csv.close();
// deallocate all memory
delete[] g_old;
delete[] g_new;
delete[] heaters;
cudaFree(d_old);
cudaFree(d_new);
cudaFree(d_heaters);
}
|
23,025 | #include "includes.h"
__global__ void gpuTranspose(float *a, float *b, int m, int n) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
uint j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < m && j < n) {
b[j * m + i] = a[i * n + j];
}
} |
23,026 | #include "kernels.cuh"
// TESETER: Tarek
// Incremement times by drawn RVs
// double[] randomVariables: The random variables array on device
// double[] times: The array of times on device
// size_t s: the number of simulations
__global__ void updateTimesKernel(double* randomVariables, double* times, size_t s) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < s) {
times[gid] += randomVariables[gid];
}
}
// TESTER: Tarek
// Multiply the elements of 1D array x1 by the last column of 2D array x2 in-place
// double[] x1: 1D Array which is multiplied and stores the output
// double[] x2: 2D Array which multiplies x1 in place
// int n: Offset for each row in order to grab the final entry
// int size: overall size of x1 and the number of rows in x2
__global__ void offsetMultiplicationKernel(double* x1, double* x2, size_t n, size_t size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
// Ignore excess threads for a multi-block invocation
if (gid < size) {
x1[gid] *= x2[n + gid * n - 1];
}
}
// TESTER: Vidur
// fired_reactions: list (length s) of the IDs of the reaction fired for each simulation
// sim_configs: matrix giving configuration for each simulation
// state_changes: State change vector for each reaction
__global__ void updateSimsKernel(int s, int n, int* fired_reactions, int* sim_configs, int* state_changes, bool* stability_flags) {
// Add reactions vectors given by checkBins() to simulation configs
int gid = blockIdx.y * n + blockIdx.x * blockDim.x + threadIdx.x;
int rid = blockIdx.x * blockDim.x + threadIdx.x;
// Only progress if within bounds and simulation has not reached stability
if (rid < n && stability_flags[blockIdx.y] == false) {
sim_configs[gid] = sim_configs[gid] + state_changes[(fired_reactions[blockIdx.y]) * n + rid];
}
}
__global__ void updatePropsKernel(int s, int n, int m, int max_reactants, int* sim_configs, int* reactants, double* reaction_rates, double* propensities) {
// Each block updates one propensity value
int gid = blockIdx.y * m + blockIdx.x * blockDim.x + threadIdx.x;
int rid = blockIdx.x * blockDim.x + threadIdx.x;
if (rid < m) {
int reaction_num = rid; // gid % m;
int reactants_starting_idx = reaction_num * max_reactants * 2;
double propensity = reaction_rates[reaction_num];
for (int i = 0; i < max_reactants; i++) {
int molecule_idx = i * 2 + reactants_starting_idx;
int reactant_coef_idx = i * 2 + reactants_starting_idx + 1;
int molecule_amt = sim_configs[blockIdx.y * n + reactants[molecule_idx]]; // [sim*n + reactants[molecule_idx]];
for (int j = 0; j < reactants[reactant_coef_idx]; j++) {
propensity *= (molecule_amt - j);
}
}
propensities[gid] = propensity;
}
}
// TESTER: Zhecheng
// helper function for exponential RVs
__global__ void calculateExponentialRVsKernel(double* randomVariables, double* propscan, size_t s, size_t m) {
int gid = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
if (gid < s) {
randomVariables[gid] = -1 / propscan[m + gid * m - 1] * log(1 - randomVariables[gid]); // replace each uniform variable in first half of RV array with exponential counterpart
}
}
// TESTER: Tarek
// Each row of blocks corresponds to a single simulation (gridDim.y = s)
// Each row has enough blocks to create enough threads to check every bin location at once (gridDim.x * blockDim.x >= m)
__global__ void checkBinsKernel(double* propensity_scan, double* uniformRVs, int* bins, size_t s, size_t m) {
int tid = threadIdx.x;
int rid = blockIdx.x * blockDim.x + tid;
int gid = blockIdx.y * m + rid;
// only threads with row index less than the number of bins will check so that excess threads in last block are excluded
if (rid < m) {
// Move uniform RV for simulation and the bins being checked to shared memory
double urv = uniformRVs[blockIdx.y]; // make sure uniformRVs is pointer offset
// TODO: Would be made more efficient by padding with 0 at the beginning of the scan, as this would eliminate the if statement
// TODO: Each value is loaded twice, should be changed to avoid this
double left_edge, right_edge;
if (rid == 0) {
left_edge = 0;
right_edge = propensity_scan[gid];
}
else {
left_edge = propensity_scan[gid - 1];
right_edge = propensity_scan[gid];
}
// Last thread in row increases its right edge to include 1 in the boundary
if (rid == m - 1) {
right_edge += 1;
}
// Only one warp per row will diverge on this instruction
if (left_edge <= urv && urv < right_edge) {
bins[blockIdx.y] = rid;
}
}
}
__global__ void propCheckKernel(double* propscan, bool* stability_flags, int s, int m) {
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + tid;
double prop_row_sum = propscan[m + tid * m - 1];
// only set stability to true if the thread is within bounds and the simulation propensity sum is nonzero
if (gid < s && prop_row_sum == 0) {
stability_flags[gid] = true;
}
} |
23,027 | #include "Graph.cuh"
#include "CudaHelper.cuh"
#include "VectorHelper.cuh"
namespace atspSolver
{
Graph::Graph(int numberOfNodes) : numberOfNodes_(numberOfNodes), adjacencyMatrix_(new double[numberOfNodes*numberOfNodes])
{
Graph::generateGraph();
}
Graph::Graph(const double *adjacencyMatrix, int numberOfNodes) : numberOfNodes_(numberOfNodes), adjacencyMatrix_(new double[numberOfNodes*numberOfNodes])
{
memcpy(this->adjacencyMatrix_, adjacencyMatrix, sizeof(double) * numberOfNodes * numberOfNodes);
}
Graph::~Graph()
{
delete[] adjacencyMatrix_;
}
void Graph::copyElementsFromGraph(const Graph& graph)
{
double *devAdjacencyMatrixSrc, *devAdjacencyMatrixDst;
unsigned int totalBytes = sizeof(double) * numberOfNodes_ * numberOfNodes_;
CUDA_ERR_CHECK(cudaMalloc((void**)&devAdjacencyMatrixSrc, totalBytes));
CUDA_ERR_CHECK(cudaMalloc((void**)&devAdjacencyMatrixDst, totalBytes));
CUDA_ERR_CHECK(cudaMemcpy((void *)devAdjacencyMatrixSrc, (const void *)graph.adjacencyMatrix_, totalBytes, cudaMemcpyHostToDevice));
d_copyVectorElements << <blocksPerGrid, threadsPerBlock >> > (devAdjacencyMatrixDst, devAdjacencyMatrixSrc, numberOfNodes_ * numberOfNodes_);
CUDA_ERR_CHECK(cudaMemcpy(adjacencyMatrix_, devAdjacencyMatrixDst, totalBytes, cudaMemcpyDeviceToHost));
CUDA_ERR_CHECK(cudaFree(devAdjacencyMatrixSrc));
CUDA_ERR_CHECK(cudaFree(devAdjacencyMatrixDst));
}
Graph::Graph(const Graph& graph)
{
adjacencyMatrix_ = new double[graph.numberOfNodes_ * graph.numberOfNodes_];
numberOfNodes_ = graph.numberOfNodes_;
copyElementsFromGraph(graph);
}
Graph& Graph::operator=(const Graph& graph)
{
double *origAdjacencyMatrix = adjacencyMatrix_;
adjacencyMatrix_ = new double[graph.numberOfNodes_ * graph.numberOfNodes_];
numberOfNodes_ = graph.numberOfNodes_;
copyElementsFromGraph(graph);
delete[] origAdjacencyMatrix;
return *this;
}
double Graph::operator()(int row, int col) const
{
int index = col + row * numberOfNodes_;
return this->adjacencyMatrix_[index];
}
double Graph::operator[](int index) const
{
return this->adjacencyMatrix_[index];
}
Graph Graph::operator+(const Graph& graph) const
{
if (numberOfNodes_ != graph.numberOfNodes_)
{
throw;
}
double *devAdjacencyMatrix1, *devAdjacencyMatrix2, *devAdjacencyMatrixDst;
unsigned int totalBytes = sizeof(double) * numberOfNodes_ * numberOfNodes_;
CUDA_ERR_CHECK(cudaMalloc((void**)&devAdjacencyMatrix1, totalBytes));
CUDA_ERR_CHECK(cudaMalloc((void**)&devAdjacencyMatrix2, totalBytes));
CUDA_ERR_CHECK(cudaMalloc((void**)&devAdjacencyMatrixDst, totalBytes));
CUDA_ERR_CHECK(cudaMemcpy((void *)devAdjacencyMatrix1, (const void *)graph.adjacencyMatrix_, totalBytes, cudaMemcpyHostToDevice));
CUDA_ERR_CHECK(cudaMemcpy((void *)devAdjacencyMatrix2, (const void *)adjacencyMatrix_, totalBytes, cudaMemcpyHostToDevice));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// gpu work
d_sumVectorElements << <blocksPerGrid, threadsPerBlock >> > (devAdjacencyMatrix1, devAdjacencyMatrix2, devAdjacencyMatrixDst, numberOfNodes_ * numberOfNodes_);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
std::cout << std::endl;
std::cout << "---> Duration in Graph addition: " << elapsedTime << " ms." << " <---" << std::endl;
std::cout << std::endl;
double *result = new double[numberOfNodes_ * numberOfNodes_];
CUDA_ERR_CHECK(cudaMemcpy((void *)result, (const void *)devAdjacencyMatrixDst, totalBytes, cudaMemcpyDeviceToHost));
CUDA_ERR_CHECK(cudaFree(devAdjacencyMatrix1));
CUDA_ERR_CHECK(cudaFree(devAdjacencyMatrix2));
CUDA_ERR_CHECK(cudaFree(devAdjacencyMatrixDst));
return Graph((const double *)result, numberOfNodes_);
}
void Graph::generateGraph()
{
double *devAdjacencyMatrix;
unsigned int totalBytes = sizeof(double) * numberOfNodes_ * numberOfNodes_;
CUDA_ERR_CHECK(cudaMalloc((void**)&devAdjacencyMatrix, totalBytes));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// gpu work
d_fillVectorRandomly << <blocksPerGrid, threadsPerBlock >> > (devAdjacencyMatrix, numberOfNodes_ * numberOfNodes_);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
std::cout << std::endl;
std::cout << "---> Duration in Graph generation with random numbers: " << elapsedTime << " ms." << " <---" << std::endl;
CUDA_ERR_CHECK(cudaMemcpy(adjacencyMatrix_, devAdjacencyMatrix, totalBytes, cudaMemcpyDeviceToHost));
//displayMatrix((const double *)adjacencyMatrix, numberOfNodes, numberOfNodes);
CUDA_ERR_CHECK(cudaFree(devAdjacencyMatrix));
}
int Graph::getNumberOfNodes() const
{
return numberOfNodes_;
}
void Graph::display() const
{
displayMatrix((const double *)adjacencyMatrix_, numberOfNodes_, numberOfNodes_);
}
} |
23,028 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/random.h>
#include <thrust/inner_product.h>
#include <thrust/binary_search.h>
#include <thrust/adjacent_difference.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <iostream>
#include <iomanip>
#include <iterator>
// simple routine to print contents of a vector
template <typename Vector>
void print_vector(const std::string& name, const Vector& v)
{
typedef typename Vector::value_type T;
std::cout << " " << std::setw(20) << name << " ";
thrust::copy(v.begin(), v.end(), std::ostream_iterator<T>(std::cout, " "));
std::cout << std::endl;
}
// dense histogram using binary search
template <typename Vector1,
typename Vector2>
void dense_histogram(const Vector1& input,
Vector2& histogram)
{
typedef typename Vector1::value_type ValueType; // input value type
typedef typename Vector2::value_type IndexType; // histogram index type
// copy input data (could be skipped if input is allowed to be modified)
thrust::device_vector<ValueType> data(input);
// print the initial data
print_vector("initial data", data);
// sort data to bring equal elements together
thrust::sort(data.begin(), data.end());
// print the sorted data
print_vector("sorted data", data);
// number of histogram bins is equal to the maximum value plus one
IndexType num_bins = data.back() + 1;
// resize histogram storage
histogram.resize(num_bins);
// find the end of each bin of values
thrust::counting_iterator<IndexType> search_begin(0);
thrust::upper_bound(data.begin(), data.end(),
search_begin, search_begin + num_bins,
histogram.begin());
// print the cumulative histogram
print_vector("cumulative histogram", histogram);
// compute the histogram by taking differences of the cumulative histogram
thrust::adjacent_difference(histogram.begin(), histogram.end(),
histogram.begin());
// print the histogram
print_vector("histogram", histogram);
}
// sparse histogram using reduce_by_key
template <typename Vector1,
typename Vector2,
typename Vector3>
void sparse_histogram(const Vector1& input,
Vector2& histogram_values,
Vector3& histogram_counts)
{
typedef typename Vector1::value_type ValueType; // input value type
typedef typename Vector3::value_type IndexType; // histogram index type
// copy input data (could be skipped if input is allowed to be modified)
thrust::device_vector<ValueType> data(input);
// print the initial data
print_vector("initial data", data);
// sort data to bring equal elements together
thrust::sort(data.begin(), data.end());
// print the sorted data
print_vector("sorted data", data);
// number of histogram bins is equal to number of unique values (assumes data.size() > 0)
IndexType num_bins = thrust::inner_product(data.begin(), data.end() - 1,
data.begin() + 1,
IndexType(1),
thrust::plus<IndexType>(),
thrust::not_equal_to<ValueType>());
// resize histogram storage
histogram_values.resize(num_bins);
histogram_counts.resize(num_bins);
// compact find the end of each bin of values
thrust::reduce_by_key(data.begin(), data.end(),
thrust::constant_iterator<IndexType>(1),
histogram_values.begin(),
histogram_counts.begin());
// print the sparse histogram
print_vector("histogram values", histogram_values);
print_vector("histogram counts", histogram_counts);
}
int main(void)
{
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> dist(0, 9);
const int N = 40;
const int S = 4;
// generate random data on the host
thrust::host_vector<int> input(N);
for(int i = 0; i < N; i++)
{
int sum = 0;
for (int j = 0; j < S; j++)
sum += dist(rng);
input[i] = sum / S;
}
// demonstrate dense histogram method
{
std::cout << "Dense Histogram" << std::endl;
thrust::device_vector<int> histogram;
dense_histogram(input, histogram);
}
// demonstrate sparse histogram method
{
std::cout << "Sparse Histogram" << std::endl;
thrust::device_vector<int> histogram_values;
thrust::device_vector<int> histogram_counts;
sparse_histogram(input, histogram_values, histogram_counts);
}
// Note:
// A dense histogram can be converted to a sparse histogram
// using stream compaction (i.e. thrust::copy_if).
// A sparse histogram can be expanded into a dense histogram
// by initializing the dense histogram to zero (with thrust::fill)
// and then scattering the histogram counts (with thrust::scatter).
return 0;
}
|
23,029 | #include "includes.h"
__global__ void FullyConnectedUpdateMemoryKernel( float *avgWeightGradPtr, float *avgBiasGradPtr, float *avgWeightGradVarPtr, float *avgBiasGradVarPtr, float *weightMemorySizePtr, float *biasMemorySizePtr, float *dropoutMaskPtr, int prevLayerSize, int thisLayerSize )
{
// i: prev. layer neuron id
// j: current layer neuron id
int i;
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
if (!dropoutMaskPtr[j])
{
int index = j;
for (i = 0; i < prevLayerSize; i++)
{
// update memory size
weightMemorySizePtr[index] = (1.0f - avgWeightGradPtr[index] * avgWeightGradPtr[index] / avgWeightGradVarPtr[index]) * weightMemorySizePtr[index] + 1.0f;
index += thisLayerSize;
}
// update memory size
biasMemorySizePtr[j] = (1.0f - avgBiasGradPtr[j] * avgBiasGradPtr[j] / avgBiasGradVarPtr[j]) * biasMemorySizePtr[j] + 1.0f;
}
}
} |
23,030 | // Yuxuan, 27 June
// Parallel (CUDA) version of Hines algorthm.
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <cstring>
#include <cuda.h>
__global__ void HinesAlgo (
double *u, double *l, double *d,
double *rhs, int *p, int N
) {
int i;
double factor;
int offset = blockIdx.x * N;
for (i=N-1;i>=1;--i) {
factor = u[i+offset] / d[i+offset];
d[p[i]+offset] -= factor * l[i+offset];
rhs[p[i]+offset] -= factor * rhs[i+offset];
}
rhs[0+offset] /= d[0+offset];
for (i=1;i<=N-1;++i) {
rhs[i+offset] -= l[i+offset] * rhs[p[i]+offset];
rhs[i+offset] /= d[i+offset];
}
}
// the main function receives 3 parameters: input path, output path and the number of repeated runs
// the number of repeated runs is required to be a multiple of 32
int main (int argc, char * argv[]) {
FILE *fp;
clock_t time;
int runNum;
cudaDeviceProp devProp;
int blockNum, blockSize;
// Host data
int *id; double *u; double *l;
double *d; double *rhs; int *p;
// Device data
int *id1; double *u1; double *l1;
double *d1; double *rhs1; int *p1;
int N;
// read data
fp = fopen(argv[1], "r");
fscanf(fp, "%d", &N);
id = new int [N]; u = new double [N]; l = new double [N];
d = new double [N]; rhs = new double [N]; p = new int [N];
for (int i=0;i<N;++i) {
fscanf(
fp, "%d %lf %lf %lf %lf %d",
&id[i], &u[i], &l[i], &rhs[i], &d[i], &p[i]);
}
fclose(fp);
runNum = atoi(argv[3]);
// choose grid dim and block dim by number of SMs and number of runs
blockNum = -1;
for (blockSize=256;blockSize>=32;blockSize>>=1) {
if (runNum%blockSize==0) {
blockNum = runNum / blockSize;
if (blockNum>=devProp.multiProcessorCount) {
break;
}
}
}
if (blockNum==-1) {
printf("Number of runs is not a multiple of 32.");
return -1;
}
// allocate space for device data
cudaMalloc(reinterpret_cast<void **>(&id1), N*sizeof(int));
cudaMalloc(reinterpret_cast<void **>(&u1), runNum*N*sizeof(double));
cudaMalloc(reinterpret_cast<void **>(&l1), runNum*N*sizeof(double));
cudaMalloc(reinterpret_cast<void **>(&d1), runNum*N*sizeof(double));
cudaMalloc(reinterpret_cast<void **>(&rhs1), runNum*N*sizeof(double));
cudaMalloc(reinterpret_cast<void **>(&p1), N*sizeof(int));
time = clock(); // include time for device memory copy so that comparison to serial code is fair
// copy host data to device data
cudaMemcpy(id1, id, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(p1, p, N*sizeof(int), cudaMemcpyHostToDevice);
for (int i=0;i<runNum;++i) {
cudaMemcpy(u1+i*N, u, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(l1+i*N, l, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d1+i*N, d, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(rhs1+i*N, rhs, N*sizeof(double), cudaMemcpyHostToDevice);
}
HinesAlgo <<<blockNum, blockSize>>> (u1, l1, d1, rhs1, p1, N);
cudaDeviceSynchronize();
// copy result back to host data
cudaMemcpy(u, u1, N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(l, l1, N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(d, d1, N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(rhs, rhs1, N*sizeof(double), cudaMemcpyDeviceToHost);
time = clock() - time;
printf("Parallel time cost of %d runs: %.2f seconds.\n", runNum, static_cast<double>(time)/CLOCKS_PER_SEC);
// write result
fp = fopen(argv[2], "w+");
for (int i=0;i<N;++i) {
fprintf(
fp, "%d %lf %lf %lf %lf\n",
id[i], u[i], l[i], rhs[i], d[i]);
}
fclose(fp);
delete[] id; delete[] u; delete[] l;
delete[] d; delete[] rhs; delete[] p;
} |
23,031 | #include <iostream>
#include <algorithm>
__managed__ unsigned int messagenum = 0;
using namespace std;
// kernal function takes in arguments cipher c, modulus n, messagelist(Which is shared betweeen host
// and device in Unified memory).
__global__
void breakingrsa(unsigned long long ciphertext,unsigned long long int n,unsigned long long *messagelist, int ciphermessagescount){
unsigned long long int thread_pos = blockIdx.x * blockDim.x +threadIdx.x;
int total_threads = blockDim.x * gridDim.x;
for (unsigned long long i = thread_pos; i < n; i+=total_threads) {
unsigned long long val =1 ;
for(int j =0;j<3;j++) {
val = ((val * i) % n);
}
if(val==ciphertext && messagenum < ciphermessagescount){
messagelist[atomicAdd(&messagenum, 1)] = i; // Here atomicAdd is used to perform the addition of the RSA messages which are decoded.
// This is done by performing atomic addition i,e either it increases the values in the
// mentioned address or it doesnt and prevents the value from interleaving.
}
}
}
// This is the main function takes in aruments the ciphertext c and the modulus n.
int main(int argc, char **argv) {
if (argc < 3) {
// When the input arguments are wrong.
std::cerr << "USAGE: " << argv[0] << " ciphertext" << " " <<argv[1]<< " modulus" << argv[2] << std::endl;
return 1;
}
unsigned long long modulus = stoull(argv[2]);
unsigned long long cipher = stoull(argv[1]);
unsigned long long int *messagelist;
int ciphermessagescount = 100;
cudaMallocManaged(&messagelist,ciphermessagescount*sizeof(unsigned long long));
int threadsperblock = 256;
int totalblocks = (modulus+threadsperblock-1) / threadsperblock;
breakingrsa<<< totalblocks,threadsperblock >>>(cipher,modulus,messagelist,100);
cudaDeviceSynchronize();
if (messagenum == 0){
cout << "No cube roots of " << cipher << " (mod " << argv[2] << ")";
}else{
sort(messagelist,messagelist+messagenum);
for(int i = 0; i <messagenum;i++){
cout << messagelist[i] <<"^3 = " << cipher << " (mod " << argv[2] << ")" << endl;
}
}
cudaFree(messagelist);
return 0;
}
|
23,032 | #include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
#define WIDTH 100;
__global__
void Matrix_multiplication(int *A, int *B, int *C, int n){
int col=threadIdx.x+blockIdx.x*blockDim.x;
int row=threadIdx.y+blockIdx.y*blockDim.y;
int value=0;
if((row<n)&&(col<n))
for(int k=0; k<n; k++){
value+=A[row*n+k]*B[col*k+n];
}
C[row*n+col]=value;
}
int main(){
int *A, *B, *C;
int *d_A, *d_B, *d_C;
dim3 dimGrid(10, 10, 1);
dim3 dimBlock(10, 10, 1);
cudaMallocHost((void**)&A, sizeof(int)*100);
cudaMallocHost((void**)&B, sizeof(int)*100);
cudaMallocHost((void**)&C, sizeof(int)*100);
for(int i=0; i<10; i++){
for(int j=0; j<10; j++){
A[i*10+j]=i+1;
B[i*10+j]=1;
}
}
if(cudaMalloc((void **)&d_A, 100*sizeof(int))!=cudaSuccess ){
printf("cudaMalloc d_A:Error while dynamically allocating memory\n");
exit(0);
}
if(cudaMalloc((void **)&d_B, 100*sizeof(int))!=cudaSuccess){
printf("cudaMalloc d_B:Error while dynamically allocating memory\n");
exit(0);
}
if(cudaMalloc((void **)&d_C, 100*sizeof(int))!=cudaSuccess){
printf("cudaMalloc d_C:Error while dynamically allocating memory\n");
exit(0);
}
if((cudaMemcpy(d_A, A, 100*sizeof(int), cudaMemcpyHostToDevice))!=cudaSuccess){
printf("cudaMemcpy: Error while copying the matrix A\n");
exit(0);
}
if(cudaMemcpy(d_B, B, 100*sizeof(int), cudaMemcpyHostToDevice)!=cudaSuccess){
printf("cudaMemcpy: Error while copying the matrix B\n");
exit(0);
}
Matrix_multiplication<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, 100);
if(cudaMemcpy(d_C, C, 100*sizeof(int), cudaMemcpyDeviceToHost)!=cudaSuccess){
printf("cudaMemcpy: Error while copying the matrix C\n");
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_B);
for(int i=0; i<10; i++){
for(int j=0; j<10; j++){
printf("%d ",C[i*10+j]);
}
printf("\n");
}
return 0;
}
|
23,033 | #include "includes.h"
__global__ void cu_copyMakeBorder(const float *src, float* dst, const int rowssrc, const int colssrc, const int up, const int down, const int left, const int right, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int colsdst = colssrc + left + right;
while(tid < n){
int csrc = tid % colssrc;
int rsrc = tid / colssrc;
int rdst = up + rsrc;
int cdst = left + csrc;
dst[rdst * colsdst + cdst] = src[tid];
tid += stride;
}
} |
23,034 | template <class T, int gaussElim>
__device__ void serial(T *a, T *b, T *c, T *d, int numEqs, int thid){
c[thid] = c[thid] / b[thid];
d[thid] = d[thid] / b[thid];
T tmp1, tmp2;
for (int i = gaussElim+thid; i < numEqs; i+=gaussElim) //i=stride+thid; i + = stride; i < numEqs*numSerialize (systemSize)
{
//iterations = numSystems/gaussElim - 1
//5 shared memory loads, 2 shared memory store
tmp2 = a[i];
tmp1 = (b[i] - tmp2 * c[i-gaussElim]);
c[i] /= tmp1;//(b[i] - a[i] * c[i-stride]);
d[i] = (d[i] - d[i-gaussElim] * tmp2) /tmp1;//(b[i] - a[i] * c[i-stride]); //i - stride
}
//x[numEqs-gaussElim+thid] = d[numEqs-gaussElim+thid]; //c[(numEqs-1)*numThreads+thid]
tmp1 = d[numEqs-gaussElim+thid];
for (int i =numEqs-2*gaussElim+thid; i >= 0; i-=gaussElim) //c[(numEqs-2)*numThreads+thid] i-=stride
{
//iterations = numSystems/gaussElim - 1
//3 loads, 1 store
tmp1 = d[i] - c[i] * tmp1; //i + stride
d[i] = tmp1;
}
//total loads: 7*(numSystems/gaussElim-1) + 4
//total stores 3*(numSystems/gaussElim-1) + 2
}
/*template <class T, int gaussElim>
__device__ void serial(T *a, T *b, T *c, T *d, int numEqs, int thid){
//c[(numEqs-1)*stride + thid] = 0;
c[thid] = c[thid] / b[thid]; // c[thid]
d[thid] = d[thid] / b[thid];
T tmp1;
for (int i = gaussElim+thid; i < numEqs; i+=gaussElim) //i=stride+thid; i + = stride; i < numEqs*numSerialize (systemSize)
{
tmp1 = (b[i] - a[i] * c[i-gaussElim]);
c[i] = c[i] / tmp1;//(b[i] - a[i] * c[i-stride]);
d[i] = (d[i] - d[i-gaussElim] * a[i]) /tmp1;//(b[i] - a[i] * c[i-stride]); //i - stride
}
//x[numEqs-gaussElim+thid] = d[numEqs-gaussElim+thid]; //c[(numEqs-1)*numThreads+thid]
for (int i =numEqs-2*gaussElim+thid; i >= 0; i-=gaussElim) //c[(numEqs-2)*numThreads+thid] i-=stride
{
d[i] = d[i] - c[i] * d[i+gaussElim]; //i + stride
}
}*/
template <class T, int loopsPerThread, int gaussElim, int iteration>
__global__ void pcrKernelBranchFree(T *d_a, T *d_b, T *d_c, T *d_d, int numEquations, int systemSize)
{
int thid = threadIdx.x;
int blid = blockIdx.x;
int delta = 1;
extern __shared__ char shared[];
T* a = (T*)shared;
T* b = (T*)&a[systemSize+1];
T* c = (T*)&b[systemSize+1];
T* d = (T*)&c[systemSize+1];
T aNew, bNew, cNew, dNew;
a[thid] = d_a[thid + blid * systemSize];
b[thid] = d_b[thid + blid * systemSize];
c[thid] = d_c[thid + blid * systemSize];
d[thid] = d_d[thid + blid * systemSize];
__syncthreads();
for (int j = 0; j <iteration; j++)
{
//12 loads and four saves per iteration
//iteration = log2(gaussElim)
int iRight = min(thid+delta, systemSize-1);
int iLeft = max(thid-delta, 0);
T tmp1 = a[thid]/b[iLeft];
aNew = -a[iLeft]*tmp1;
bNew = b[thid] - c[iLeft]*tmp1;
dNew = d[thid] - d[iLeft]*tmp1;
tmp1 = c[thid]/b[iRight];
bNew -= a[iRight]*tmp1;
cNew = -c[iRight]*tmp1;
dNew -= d[iRight]*tmp1;
__syncthreads();
a[thid] = aNew;
b[thid] = bNew;
c[thid] = cNew;
d[thid] = dNew;
__syncthreads();
delta = delta* 2;
}
if(thid<gaussElim)
{
serial<T,gaussElim>(a,b,c,d,systemSize, thid);
}
__syncthreads();
//Total Shared Mem loads per thread: 5 + 12*log2(gaussElim) + gaussElim/numThreads*(8*numThreads/gaussElim-1 + 5)
//Total Shared Mem loads per thread: 5 + 12*log2(gaussElim) + (8+4*gaussElim/numThreads) = 13 + 12*log2(gaussElim) + 4*gaussElim/numThreads
//Total Shared Mem stores per thread: 4*log2(gaussElim) + (3+3*gaussElim/numThreads) = 3 + 4*log2(gaussElim) + 3*gaussElim/numThreads
//#pragma unroll
d_d[thid + blid * systemSize] = d[thid];
}
template <class T, int loopsPerThread, int gaussElim, int iteration>
__global__ void pcrKernelBranchFree(T *d_a, T *d_b, T *d_c, T *d_d, int numEquations, int systemSize, int extraStride, int BPS)
{
int thid = threadIdx.x;
int blid = blockIdx.x;
int bdim = blockDim.x;
int delta = extraStride;
int bside = blid%BPS;
int l = 0;
//int iteration = (int)log2(T(gaussElim));
//__syncthreads();
extern __shared__ char shared[];
T* a = (T*)shared;
T* b = (T*)&a[systemSize+2];
T* c = (T*)&b[systemSize+2];
T* d = (T*)&c[systemSize+2];
T* x = (T*)&d[systemSize+2];
a[thid] = d_a[(thid + blid * systemSize)];
b[thid] = d_b[(thid + blid * systemSize)];
c[thid] = d_c[(thid+ blid * systemSize)];
d[thid] = d_d[(thid+ blid * systemSize)];
T aNew, bNew, cNew, dNew;
__syncthreads();
//parallel cyclic reduction
for (int j = 0; j <iteration; j++)
{
//#pragma unroll
int i = thid;
int iRight = i+delta;
if (iRight >= systemSize && bside==BPS-1)
iRight = systemSize - 1;
int iLeft = i-delta;
if (iLeft < 0 && bside == 0) iLeft = 0;
if(iRight >=systemSize)
{
bNew = b[i] - c[iLeft]*a[i]/b[iLeft] - d_a[blid*systemSize + iRight]*c[i]/d_b[blid*systemSize+ iRight];
dNew = d[i] - d[iLeft]*a[i]/b[iLeft] - d_d[blid*systemSize + iRight]*c[i]/d_b[blid*systemSize + iRight];
aNew = -a[iLeft] * a[i]/b[iLeft];
cNew = -d_c[blid*systemSize+iRight]*c[i]/d_b[blid*systemSize+iRight];
}
//iLeft = iLeft%systemSize;
else if(iLeft < 0)
{
bNew = b[i] - d_c[blid*systemSize+iLeft] * a[i]/d_b[blid*systemSize+iLeft] - a[iRight] * c[i]/b[iRight];
dNew = d[i] - d_d[blid*systemSize+iLeft] * a[i]/d_b[blid*systemSize+iLeft] - d[iRight] * c[i]/b[iRight];
aNew = -d_a[blid*systemSize+iLeft] * a[i]/d_b[blid*systemSize+iLeft];
cNew = -c[iRight] * c[i]/b[iRight];
}
else
{
T tmp1 = a[i] / b[iLeft];
T tmp2 = c[i] / b[iRight];
bNew = b[i] - c[iLeft] * tmp1 - a[iRight] * tmp2;
dNew = d[i] - d[iLeft] * tmp1 - d[iRight] * tmp2;
aNew = -a[iLeft] * tmp1;
cNew = -c[iRight] * tmp2;
}
delta *= 2;
__syncthreads();
b[thid] = bNew;
d[thid] = dNew;
a[thid] = aNew;
c[thid] = cNew;
}
if(thid<gaussElim)
{
serial<T,gaussElim>(a,b,c,d,systemSize, thid);
}
__syncthreads();
//#pragma unroll
for(l=0; l < loopsPerThread; l++)
{
d_d[thid + l*bdim + blid * systemSize] = d[thid+l*bdim];
}
}
template <class T, int smallSystem>
__global__ void globalPCROneStep(T* d_aout, T* d_bout, T* d_cout, T*d_dout,
T *d_a, T *d_b, T *d_c, T *d_d, int systemSize,
int stride, int numBlocksPerSystem)
{
int thid = threadIdx.x;
int blid = blockIdx.x;
//int bSide = blid & (numBlocksPerSystem-1);
//int delta = stride;
int bSide = blid%numBlocksPerSystem;
//__syncthreads();
//parallel cyclic reduction
int blockLeftMin = int(blid/numBlocksPerSystem)*systemSize;
int blockRightMax = blockLeftMin + systemSize;
int pos = bSide*smallSystem + thid;
int i = thid + blockLeftMin + bSide*blockDim.x;
while(pos<systemSize)
{
int iRight = i+stride;
if (iRight >= blockRightMax) iRight = blockRightMax-1;
int iLeft = i-stride;
//iLeft = iLeft%systemSize;
if (iLeft < blockLeftMin) iLeft = blockLeftMin;
T tmp1 = d_a[i] / d_b[iLeft];
T tmp2 = d_c[i] / d_b[iRight];
//could get small reuse (3x?) if i still share between threads
d_bout[i] = d_b[i] - d_c[iLeft] * tmp1 - d_a[iRight] * tmp2;
d_dout[i] = d_d[i] - d_d[iLeft] * tmp1 - d_d[iRight] * tmp2;
d_aout[i] = -d_a[iLeft] * tmp1;
d_cout[i] = -d_c[iRight] * tmp2;
blockLeftMin += blockDim.x*gridDim.x;
pos += smallSystem*numBlocksPerSystem;
i += smallSystem*numBlocksPerSystem;
}
//need global synch here?
}
template <class T, int smallSystem>
__global__ void globalPCROneBlockSystem(T* d_aout, T* d_bout, T* d_cout, T*d_dout,
T *d_a, T *d_b, T *d_c, T *d_d, int systemSize,
int stride)
{
int thid = threadIdx.x;
int blid = blockIdx.x;
int tid;
int delta = 1;
#if USESM
/* extern __shared__ char shared[];
T* a = (T*)shared;
T* b = (T*)&a[systemSize+2];
T* c = (T*)&b[systemSize+2];
T* d = (T*)&c[systemSize+2];*/
#endif
int blockLeftMin = systemSize*blockIdx.x;
int blockRightMax = blockLeftMin + systemSize;
int count = 0;
while(systemSize/delta > smallSystem)
{
tid = blockLeftMin+thid;
while(tid < blockRightMax)
{
int i = tid;
int iRight = i+delta;
if (iRight >= blockRightMax) iRight = blockRightMax-1;
int iLeft = i-delta;
//iLeft = iLeft%systemSize;
if (iLeft < blockLeftMin) iLeft = blockLeftMin;
if(count == 0)
{
//experimenting to see if I can get some speedup using shared memory
//for a little bit of data-reuse
//this might be a machine dependent parameter that might not be useful for
//fermi cards, but useful for non-caching global memory
#if USESM
/* a[thid] = d_a[i];
b[thid] = d_b[i];
c[thid] = d_c[i];
d[thid] = d_d[i];
T tmp1 = a[thid]/d_b[iLeft];
T tmp2 = c[thid]/d_b[iRight];
d_bout[i] = b[thid]-d_c[iLeft]*tmp1-d_a[iRight]*tmp2; //
d_dout[i] = d[thid]-d_d[iLeft]*tmp1-d_d[iRight]*tmp2; //
d_aout[i] = -d_a[iLeft] * tmp1; //
d_cout[i] = -d_c[iRight] * tmp2; // */
#else
T tmp1 = d_a[i] / d_b[iLeft]; //a[thid]/d_b[iLeft]; //
T tmp2 = d_c[i] / d_b[iRight]; //c[thid]/d_b[iRight]; //
//could get small reuse (3x?) if i still share between threads
d_bout[i] = d_b[i] - d_c[iLeft] * tmp1 - d_a[iRight] * tmp2; //b[thid]-d_c[iLeft]*tmp1-d_a[iRight]*tmp2; //
d_dout[i] = d_d[i] - d_d[iLeft] * tmp1 - d_d[iRight] * tmp2; //d[thid]-d_d[iLeft]*tmp1-d_d[iRight]*tmp2; //
d_aout[i] = -d_a[iLeft]*tmp1;// -d_a[iLeft] * tmp1; //
d_cout[i] = -d_c[iRight]*tmp2;//-d_c[iRight] * tmp2; //
#endif
}
else
{
#if USESM
/* a[thid] = d_aout[i];
b[thid] = d_bout[i];
c[thid] = d_cout[i];
d[thid] = d_dout[i];
T tmp1 = a[thid]/d_bout[iLeft];
T tmp2 = c[thid]/d_bout[iRight];
d_bout[i] = b[thid]-d_c[iLeft]*tmp1-d_aout[iRight]*tmp2; //
d_dout[i] = d[thid]-d_d[iLeft]*tmp1-d_dout[iRight]*tmp2; //
d_aout[i] = -d_aout[iLeft] * tmp1; //
d_cout[i] = -d_cout[iRight] * tmp2; // */
#else
T tmp1 = d_aout[i] / d_bout[iLeft]; //a[thid]/d_b[iLeft]; //
T tmp2 = d_cout[i] / d_bout[iRight]; //c[thid]/d_b[iRight]; //
//could get small reuse (3x?) if i still share between threads
d_b[i] = d_bout[i] - d_cout[iLeft] * tmp1 - d_aout[iRight] * tmp2; //b[thid]-d_c[iLeft]*tmp1-d_a[iRight]*tmp2; //
d_d[i] = d_dout[i] - d_dout[iLeft] * tmp1 - d_dout[iRight] * tmp2; //d[thid]-d_d[iLeft]*tmp1-d_d[iRight]*tmp2; //
d_a[i] = -d_aout[iLeft]*tmp1;// -d_a[iLeft] * tmp1; //
d_c[i] = -d_cout[iRight]*tmp2;//-d_c[iRight] * tmp2; //
#endif
}
tid += blockDim.x;
__syncthreads();
}
delta*=2;
if(count == 0)
count = 1;
else
count = 0;
}
}
|
23,035 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void radix(int *a, int *b, int n, int count){
int id = threadIdx.x;
int i=0, data=0, j=0, pos=0;
int temp = a[id];
while(j<=count){
data=temp%10;
temp/=10;
j++;
}
for(i=0; i<n; i++){
j=0;
int data2, temp=a[i];
while(j<=count){
data2=temp%10;
temp/=10;
j++;
}
if(data2<data || (data2==data && i<id))
pos++;
}
b[pos]=a[id];
}
int main(){
//Standard input
int A[]={170,450,750,900,802,240,200,660};
int n=8;
int i, k=3; //3 digit
int size = sizeof(int)*n;
int *d_a, *d_b;
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
for(i=0; i<k; i++){
cudaMemcpy(d_a,&A,size,cudaMemcpyHostToDevice);
radix<<<1,n>>>(d_a,d_b,n,i);
cudaMemcpy(&A,d_b,size,cudaMemcpyDeviceToHost);
}
for(i=0; i<8; i++)
printf("\n%d",A[i]);
cudaFree(d_a);
cudaFree(d_b);
getchar();
return 0;
} |
23,036 | #include <stdio.h>
#include <time.h>
void cVecAdd(float *A, float *B, float *C)
{
for(long long i=0; i < (4096 * 16); ++i)
{
C[i] = A[i] + B[i];
}
}
__global__ void VecAdd(float *A, float *B, float *C)
{
long long i = threadIdx.x + blockIdx.x * blockDim.x;
C[i] = A[i] + B[i];
}
int main()
{
const long long N = 4096 * 16;
dim3 NumberOfThreadsPerBlock (256, 8, 1);
dim3 NumberOfBlocks ((N / NumberOfThreadsPerBlock.x),
(N / NumberOfThreadsPerBlock.y),
1);
//printf("Number of blocks %d ", NumberOfBlocks);
float A[N];
float B[N];
float C[N];
float *D_A, *D_B, *D_C;
clock_t start, end, cstart, cend;
double elapsed, celapsed;
size_t memSize = N * sizeof(float);
cudaMalloc( (void**) &D_A, memSize);
cudaMalloc( (void**) &D_B, memSize);
cudaMalloc( (void**) &D_C, memSize);
for(long long i=0; i < N; ++i)
{
A[i] = i;
B[i] = i * 2.0;
C[i] = 0;
}
cudaMemcpy(D_A, A, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(D_B, B, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(D_C, C, memSize, cudaMemcpyHostToDevice);
cudaThreadSynchronize();
start = clock();
VecAdd<<<NumberOfBlocks, NumberOfThreadsPerBlock>>>(D_A, D_B, D_C);
cudaThreadSynchronize();
cudaMemcpy(C, D_C, memSize, cudaMemcpyDeviceToHost);
end = clock();
elapsed = ((double)(end-start)) / CLOCKS_PER_SEC;
cstart = clock();
cVecAdd(A, B, C);
cend = clock();
celapsed = ((double)(cend - cstart)) / CLOCKS_PER_SEC;
printf("Time elapsed %f \n", elapsed);
printf("Time celapsed %f \n", celapsed);
cudaFree(D_A);
cudaFree(D_B);
cudaFree(D_C);
}
|
23,037 | #include<stdio.h>
#include<stdlib.h>
#include<getopt.h>
#include <assert.h>
#include <cuda.h>
#include <time.h>
static char* program_name;
// Usage
void print_usage (FILE* stream, int exit_code)
{
fprintf (stream, "Usage: %s options\n", program_name);
fprintf (stream,
" -h --help Display this usage information.\n"
" -f --file filename File containing coefficient matrix.\n"
" -i --Ny int Number of elements in Y direction (default=512).\n"
" -j --Nx int Number of elements in X direction (default=512).\n"
" -n --iterations int Number of iterations (default=10000).\n"
" -k --kernel [1,2] 1: unoptimized, 2: optimized kernel (default).\n"
" -t --tilesize int Size of each thread block in kernel 2 (default=4).\n");
exit (exit_code);
}
// On the host
void jacobiHost(float* x1, float* A, float* x2, float* b, int Ny, int Nx)
{
int i,j;
float sigma;
for (i=0; i<Ny; i++)
{
sigma = 0.0;
for (j=0; j<Nx; j++)
{
if (i != j)
sigma += A[i*Nx + j] * x2[j];
}
x1[i] = (b[i] - sigma) / A[i*Nx + i];
}
}
// On the device
__global__ void jacobiDevc(float* x1, float* A, float* x2, float* b, int Ny, int Nx)
{
float sigma = 0.0;
int idx = threadIdx.x;
for (int j=0; j<Nx; j++)
{
if (idx != j)
sigma += A[idx*Nx + j] * x2[j];
}
x1[idx] = (b[idx] - sigma) / A[idx*Nx + idx];
}
// Optimized the device
__global__ void jacobiOpDevc(float* x1, float* A, float* x2, float* b, int Ny, int Nx)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < Ny)
{
float sigma = 0.0;
int idx_Ai = idx*Nx;
for (int j=0; j<Nx; j++)
if (idx != j)
sigma += A[idx_Ai + j] * x2[j];
x1[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
}
}
// Choose GPU
static void chooseGpu(int *gpuNum, int *devcNums)
{
int m = *gpuNum;
cudaGetDeviceCount(devcNums);
if ( *devcNums > 1 )
{
int devcNum;
int coresMax = 0;
for (devcNum = 0; devcNum < *devcNums; devcNum++)
{
cudaDeviceProp devcProp;
cudaGetDeviceProperties(&devcProp, devcNum);
if (coresMax < devcProp.multiProcessorCount)
{
coresMax = devcProp.multiProcessorCount;
m = devcNum;
}
}
*gpuNum = m;
}
}
// Test device
static void devcTest(int devcId)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, devcId);
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
{
printf("We can not find useful devices.\n");
cudaThreadExit();
}
else
printf("Using GPU device number %d.\n", devcId);
}
int main(int argc, char *argv[])
{
time_t start, end, start_h, end_h, start_d, end_d;
float t_full, t_host, t_dev;
start=clock();
float *x2, *x1, *A, *b, *x_h, *x_d;
float *x2_d, *x1_d, *A_d, *b_d;
int N, Ny, Nx, iter, kernel, tileSize;
int ch;
int i,k;
char* fname;
FILE* file;
static struct option long_options[] =
{
{"file", required_argument, NULL, 'f'},
{"Ny", optional_argument, NULL, 'i'},
{"Nx", optional_argument, NULL, 'j'},
{"iterations", optional_argument, NULL, 'n'},
{"kernel", optional_argument, NULL, 'k'},
{"tilesize", optional_argument, NULL, 't'},
{"help", optional_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
program_name = argv[0];
Ny=512, Nx=512, iter=10000, kernel=2, tileSize=4;
ch=0;
while ((ch = getopt_long(argc, argv,"f:i:j:n:k:h", long_options, NULL)) != -1) {
switch (ch) {
case 'f' : fname = optarg;
break;
case 'i' : Ny = atoi(optarg);
break;
case 'j' : Nx = atoi(optarg);
break;
case 'n' : iter = atoi(optarg);
break;
case 'k' : kernel = atoi(optarg);
break;
case 't' : tileSize = atoi(optarg);
break;
case 'h': print_usage(stderr, 1);
exit(EXIT_FAILURE);
case '?': print_usage(stderr, 1);
exit(EXIT_FAILURE);
default:
abort();
}
}
N = Ny * Nx;
printf("\nRun Jacobi method:\n");
printf("======================\n\n");
printf("Coefficient matrix given in file: \n%s\n\n", fname);
printf("Parameters:\n");
printf("N=%d, Ny=%d, Nx=%d, ", N, Ny, Nx);
printf("iterations=%d, kernel=%d, tilesize=%d\n", iter,kernel,tileSize);
x1 = (float *) malloc(Ny*sizeof(float));
A = (float *) malloc(N*sizeof(float));
x2 = (float *) malloc(Ny*sizeof(float));
b = (float *) malloc(Ny*sizeof(float));
x_h = (float *) malloc(Ny*sizeof(float));
x_d = (float *) malloc(Ny*sizeof(float));
for (i=0; i<Ny; i++)
{
x2[i] = 0;
x1[i] = 0;
}
file = fopen(fname, "r");
if (file == NULL)
exit(EXIT_FAILURE);
char *line;
size_t len = 0;
i=0;
while ((getline(&line, &len, file)) != -1)
{
if (i<N)
A[i] = atof(line);
else
b[i-N] = atof(line);
i++;
}
start_h = clock();
for (k=0; k<iter; k++)
{
if (k%2)
jacobiHost(x2, A, x1, b, Ny, Nx);
else
jacobiHost(x1, A, x2, b, Ny, Nx);
}
end_h = clock();
for (i=0; i<Nx; i++)
x_h[i] = x1[i];
for (i=0; i<Ny; i++)
{
x2[i] = 0;
x1[i] = 0;
}
int devcId = 0, devcNums = 1;
chooseGpu(&devcId, &devcNums);
devcTest(devcId);
assert(cudaSuccess == cudaMalloc((void **) &x1_d, Ny*sizeof(float)));
assert(cudaSuccess == cudaMalloc((void **) &A_d, N*sizeof(float)));
assert(cudaSuccess == cudaMalloc((void **) &x2_d, Ny*sizeof(float)));
assert(cudaSuccess == cudaMalloc((void **) &b_d, Ny*sizeof(float)));
cudaMemcpy(x1_d, x1, sizeof(float)*Ny, cudaMemcpyHostToDevice);
cudaMemcpy(A_d, A, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(x2_d, x2, sizeof(float)*Ny, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, sizeof(float)*Ny, cudaMemcpyHostToDevice);
int blockSize = Ny;
int nBlocks = 1;
int nTiles = Ny/tileSize + (Ny%tileSize == 0?0:1);
int gridHeight = Nx/tileSize + (Nx%tileSize == 0?0:1);
int gridWidth = Ny/tileSize + (Ny%tileSize == 0?0:1);
printf("w=%d, h=%d\n",gridWidth,gridHeight);
dim3 dGrid(gridHeight, gridWidth),
dBlock(tileSize, tileSize);
start_d = clock();
if (kernel == 1)
{
printf("Using the first kernel.\n");
for (k=0; k<iter; k++)
{
if (k%2)
jacobiDevc <<< nBlocks, blockSize >>> (x2_d, A_d, x1_d, b_d, Ny, Nx);
else
jacobiDevc <<< nBlocks, blockSize >>> (x1_d, A_d, x2_d, b_d, Ny, Nx);
}
}
else
{
printf("Using the second kernel.\n");
for (k=0; k<iter; k++)
{
if (k%2)
jacobiOpDevc <<< nTiles, tileSize >>> (x2_d, A_d, x1_d, b_d, Ny, Nx);
else
jacobiOpDevc <<< nTiles, tileSize >>> (x1_d, A_d, x2_d, b_d, Ny, Nx);
}
}
end_d = clock();
cudaMemcpy(x_d, x1_d, sizeof(float)*Ny, cudaMemcpyDeviceToHost);
free(x1); free(A); free(x2); free(b);
cudaFree(x1_d); cudaFree(A_d); cudaFree(x2_d); cudaFree(b_d);
end=clock();
printf("\nResult after %d iterations:\n",iter);
float err = 0.0;
for (i=0; i < Ny; i++)
{
err += abs(x_h[i] - x_d[i]) / Ny;
}
printf("x_h[%d]=%f\n",0,x_h[0]);
printf("x_d[%d]=%f\n",0,x_d[0]);
t_full = ((float)end - (float)start) / CLOCKS_PER_SEC;
t_host = ((float)end_h - (float)start_h) / CLOCKS_PER_SEC;
t_dev = ((float)end_d - (float)start_d) / CLOCKS_PER_SEC;
printf("\nTiming:\nFull: %f\nHost: %f\nDevice: %f\n\n", t_full, t_host, t_dev);
printf("Relative error: %f\n", err);
printf("\nProgram terminated successfully.\n");
return 0;
}
|
23,038 | // #CSCS CUDA Training
//
// #Example 1 - retrieve device info
//
// #Author Ugo Varetto
//
// #Goal: compute the maximum size for a 1D grid layout. i.e. the max size for 1D arrays that allows
// to match a GPU thread with a single array element.
//
// #Rationale: CUDA on arch < 2.x requires client code to configure the domain layout as a 1D or 2D grid of
// 1,2 or 3D blocks; it is not possible to simply set the GPU layout to match the
// domain layout as is the case with OpenCL.
//
// #Solution: the max size for a 1D memory layout is computed as (max num blocks per grid) x (max num threads per block)
// i.e. min( deviceProp.maxThreadsDim[0], deviceProp.maxThreadsPerBlock ) * deviceProp.maxGridSize[0]
//
// #Code: finds number of devices and prints all the available information for each device,
// relevant information is:
// . deviceProp.maxGridSize[0] // max number of blocks in dimension zero
// . deviceProp.maxThreadsDim[0] // max number of threads per block along dimesion 0
// . deviceProp.maxThreadsPerBlock // max threads per block
// . (optional) deviceProp.totalGlobalMem //total amount of memory)
// proper code should perform some minimal error checking and iterate over
// all the available devices
//
// #Compilation: nvcc -arch=sm_13 1_device-query.cu -o device-query
//
// #Execution: ./1_device-query
//
// #Note: by default the code prints all the information available for each graphics card;
// #define MINIMAL to have the code print out only the relevant information
//
// #Note: the code is C++ also because the default compilation mode for CUDA is C++, all functions
// are named with C++ convention and the syntax is checked by default against C++ grammar rules
//
// #Note: -arch=sm_13 allows the code to run on every card with hw architecture GT200 (gtx 2xx) or better
#include <iostream>
//#include <cuda_runtime.h> // automatically added by nvcc
int main( int argc, const char** argv)
{
int deviceCount = 0;
if( cudaGetDeviceCount( &deviceCount ) != cudaSuccess ) {
std::cout << "cudaGetDeviceCount FAILED CUDA Driver and Runtime version may be mismatched.\n";
std::cout << "\nFAILED\n";
return 1;
}
// This function call returns 0 if there are no CUDA capable devices.
if ( deviceCount == 0 ) {
std::cout << "There is no device supporting CUDA\n";
return 1;
}
int dev = 0;
int driverVersion = 0, runtimeVersion = 0;
for( dev = 0; dev != deviceCount; ++dev ) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties( &deviceProp, dev );
if ( dev == 0) {
// This function call returns 9999 for both major & minor fields, if no CUDA capable devices are present
if( deviceProp.major == 9999 && deviceProp.minor == 9999 ) std::cout << "There is no device supporting CUDA.\n";
else if (deviceCount == 1) std::cout << "There is 1 device supporting CUDA\n";
else std::cout << "There are " << deviceCount << " devices supporting CUDA\n";
}
std::cout << "\nDevice" << dev << ": " << deviceProp.name << '\n';
#ifndef MINIMAL
cudaDriverGetVersion(&driverVersion);
std::cout << " CUDA Driver Version: " << driverVersion/1000 << '.' << driverVersion%100 << '\n';
cudaRuntimeGetVersion(&runtimeVersion);
std::cout << " CUDA Runtime Version: " << runtimeVersion/1000 << '.' << runtimeVersion%100 << '\n';
std::cout << " CUDA Capability Major/Minor version number: " << deviceProp.major << '.' << deviceProp.minor << '\n';
std::cout << " Total amount of global memory: " << deviceProp.totalGlobalMem << " bytes\n";
std::cout << " Number of multiprocessors: " << deviceProp.multiProcessorCount << '\n';
std::cout << " Total amount of constant memory: " << deviceProp.totalConstMem << " bytes\n";
std::cout << " Total amount of shared memory per block: " << deviceProp.sharedMemPerBlock << " bytes\n";
std::cout << " Total number of registers available per block: " << deviceProp.regsPerBlock << '\n';
std::cout << " Warp size: " << deviceProp.warpSize << '\n';
#endif
std::cout << " Maximum number of threads per block: " << deviceProp.maxThreadsPerBlock << '\n';
std::cout << " Maximum sizes of each dimension of a block: "
<< deviceProp.maxThreadsDim[0] << " x "
<< deviceProp.maxThreadsDim[1] << " x "
<< deviceProp.maxThreadsDim[2] << '\n';
std::cout << " Maximum sizes of each dimension of a grid: "
<< deviceProp.maxGridSize[0] << " x "
<< deviceProp.maxGridSize[1] << " x "
<< deviceProp.maxGridSize[2] << '\n';
#ifndef MINIMAL
std::cout << " Maximum memory pitch: " << deviceProp.memPitch << " bytes\n";
// #if CUDART_VERSION >= 4000
// std::cout << " Memory Bus Width: " << deviceProp.memBusWidth << "-bit\n";
// std::cout << " Memory Clock rate: " << deviceProp.memoryClock * 1e-3f << " Mhz\n";
// #endif
std::cout << " Texture alignment: " << deviceProp.textureAlignment << " bytes\n";
std::cout << " Clock rate: " << deviceProp.clockRate * 1e-6f << " GHz\n";
#if CUDART_VERSION >= 2000
std::cout << " Concurrent copy and execution: " << (deviceProp.deviceOverlap ? "Yes" : "No") << '\n';
#endif
#if CUDART_VERSION >= 4000
std::cout << " # of Asynchronous Copy Engines: " << deviceProp.asyncEngineCount << '\n';
#endif
#if CUDART_VERSION >= 2020
std::cout << " Run time limit on kernels: " << (deviceProp.kernelExecTimeoutEnabled ? "Yes\n" : "No\n");
std::cout << " Integrated: " << (deviceProp.integrated ? "Yes\n" : "No\n");
std::cout << " Support host page-locked memory mapping: " << (deviceProp.canMapHostMemory ? "Yes\n" : "No\n");
std::cout << " Compute mode: " << (deviceProp.computeMode == cudaComputeModeDefault ?
"Default (multiple host threads can use this device simultaneously)\n" :
deviceProp.computeMode == cudaComputeModeExclusive ?
"Exclusive (only one host thread at a time can use this device)\n" :
deviceProp.computeMode == cudaComputeModeProhibited ?
"Prohibited (no host thread can use this device)\n" :
"Unknown\n");
#endif
#if CUDART_VERSION >= 3000
std::cout << " Concurrent kernel execution: " << (deviceProp.concurrentKernels ? "Yes\n" : "No\n");
#endif
#if CUDART_VERSION >= 3010
std::cout << " Device has ECC support enabled: " << (deviceProp.ECCEnabled ? "Yes\n" : "No\n");
#endif
#if CUDART_VERSION >= 3020
std::cout << " Device is using TCC driver mode: " << (deviceProp.tccDriver ? "Yes\n" : "No\n");
#endif
#if CUDART_VERSION >= 4000
std::cout << " Unified addressing: " << (deviceProp.unifiedAddressing ? "Yes\n" : "No\n");
std::cout << " PCI bus id: " << deviceProp.pciBusID << '\n';
std::cout << " PCI device id: " << deviceProp.pciDeviceID << '\n';
#endif
#endif
}
return 0;
}
|
23,039 | #include "includes.h"
__global__ void kernel_updateFullMatrix( float * device_fullMatrix, float * B, float * V, float * Cm, float * Em, float * Rm, float dt, unsigned int nComp ) {
//TODO: fix memory usage matter
unsigned int t = threadIdx.x;
unsigned int baseIndex = t*nComp;
unsigned int i;
for ( i = 0; i < nComp; i++ )
{
unsigned int myIndex=baseIndex+i;
B[myIndex ] =
V[ myIndex] * Cm[myIndex] / ( dt / 2.0 ) +
Em[ myIndex] / Rm[myIndex];
}
__syncthreads();
} |
23,040 | #include "includes.h"
__global__ void _kpolymap32(int n, float *k, float c, float d) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
k[i] = pow(k[i] + c, d);
i += blockDim.x * gridDim.x;
}
} |
23,041 | #include "includes.h"
__global__ void aypb_f32 (float a, float* y, float b, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] = a * y[idx] + b;
}
} |
23,042 | //Reference implementation of reduction with dot product
//#include <cuda_runtime.h> // automatically added by nvcc
#include <vector>
#include <iostream>
typedef float real_t;
const size_t BLOCK_SIZE = 16;
__global__ void full_dot( const real_t* v1, const real_t* v2, real_t* out, int N ) {
__shared__ real_t cache[ BLOCK_SIZE ];
int i = blockIdx.x * blockDim.x + threadIdx.x;
cache[ threadIdx.x ] = 0.f;
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2; //not sure bitwise operations are actually faster
}
#ifndef NO_SYNC // serialized access to shared data;
if( threadIdx.x == 0 ) atomicAdd( out, cache[ 0 ] );
#else // no sync, what most likely happens is:
// 1) all threads read 0
// 2) all threads write concurrently 16 (local block dot product)
if( threadIdx.x == 0 ) *out += cache[ 0 ];
#endif
}
real_t dot( const real_t* v1, const real_t* v2, int N ) {
real_t s = 0;
for( int i = 0; i != N; ++i ) {
s += v1[ i ] * v2[ i ];
}
return s;
}
__global__ void init_vector( real_t* v, int N ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
while( i < N ) {
v[ i ] = 1.0f;//real_t( i ) / 1000000.f;
i += gridDim.x * blockDim.x;
}
}
//------------------------------------------------------------------------------
int main(int argc, char** argv ) {
const size_t ARRAY_SIZE = 1024;//1024 * 1024; //1Mi elements
const int BLOCKS = 64;//512;
const int THREADS_PER_BLOCK = BLOCK_SIZE;//256; // total threads = 512 x 256 = 128ki threads;
const size_t SIZE = ARRAY_SIZE * sizeof( real_t );
// device storage
real_t* dev_v1 = 0; // vector 1
real_t* dev_v2 = 0; // vector 2
real_t* dev_out = 0; // result
cudaMalloc( &dev_v1, SIZE );
cudaMalloc( &dev_v2, SIZE );
cudaMalloc( &dev_out, sizeof( real_t ) );
// host storage
std::vector< real_t > host_v1( ARRAY_SIZE );
std::vector< real_t > host_v2( ARRAY_SIZE );
real_t host_out = 0.f;
// initialize vector 1 with kernel; much faster than using for loops on the cpu
init_vector<<< 1024, 256 >>>( dev_v1, ARRAY_SIZE );
cudaMemcpy( &host_v1[ 0 ], dev_v1, SIZE, cudaMemcpyDeviceToHost );
// initialize vector 2 with kernel; much faster than using for loops on the cpu
init_vector<<< 1024, 256 >>>( dev_v2, ARRAY_SIZE );
cudaMemcpy( &host_v2[ 0 ], dev_v2, SIZE, cudaMemcpyDeviceToHost );
// initialize result on GPU: note the use of cudaMemset, alternatives are to run a kernel
// or copy from CPU
cudaMemset( dev_out, 0, sizeof( real_t) );
// execute kernel
full_dot<<<BLOCKS, THREADS_PER_BLOCK>>>( dev_v1, dev_v2, dev_out, ARRAY_SIZE );
std::cout << cudaGetErrorString( cudaGetLastError() ) << std::endl;
// copy output data from device(gpu) to host(cpu)
cudaMemcpy( &host_out, dev_out, sizeof( real_t ), cudaMemcpyDeviceToHost );
// print dot product by summing up the partially reduced vectors
std::cout << "GPU: " << host_out << std::endl;
// print dot product on cpu
std::cout << "CPU: " << dot( &host_v1[ 0 ], &host_v2[ 0 ], ARRAY_SIZE ) << std::endl;
// free memory
cudaFree( dev_v1 );
cudaFree( dev_v2 );
cudaFree( dev_out );
return 0;
}
|
23,043 | // includes, system
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <cuda_runtime.h>
#define N 256
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( err != cudaSuccess) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
///////////////////////////////////////////////////////////////////////////////
// Program main
///////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv) {
// pointers for host memory and size
int *h_a = NULL, *h_b = NULL;
// pointers for device memory
int *d_a = NULL;
h_a = (int*)malloc(sizeof(int) * N);
h_b = (int*)malloc(sizeof(int) * N);
cudaMalloc(&d_a, sizeof(int)*N);
// Initialize h_a to contain integers 0 .. N - 1
for (int i = 0; i < N; i++) {
h_a[i] = i;
}
// Zero memory for h_b
memset(h_b, 0, N * sizeof(int));
cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(h_b, d_a, N * sizeof(int), cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("memcpy");
for (int i = 0; i < N; i++) {
if (h_a[i] != h_b[i]) {
printf("Test failed h_a[%d] != h_b[%d]\n", i, i);
exit(1);
}
}
free(h_b);
free(h_a);
cudaFree(d_a);
printf("Test passed!\n");
return 0;
}
|
23,044 | #include<stdio.h>
__global__ void add(int* a, int* b, int* c, int n)
{
int idx = threadIdx.x;
if(idx<n)
c[idx] = a[idx]+ b[idx];
}
int main(){
int n;
scanf("%d",&n);
float elapsed_time;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
cudaStream_t stream0,stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
int *h_a,*h_b,*h_c;
cudaHostAlloc((void**)&h_a,20*n*sizeof(int),cudaHostAllocDefault);
cudaHostAlloc((void**)&h_b,20*n*sizeof(int),cudaHostAllocDefault);
cudaHostAlloc((void**)&h_c,20*n*sizeof(int),cudaHostAllocDefault);
for(int i=0; i<20*n; i++){
h_a[i]=i;
h_b[i]=i+1;
}
for(int i=0; i<20*n; i+=n){
int *d_a,*d_b,*d_c;
cudaMalloc((void**)&d_a,n*sizeof(int));
cudaMalloc((void**)&d_b,n*sizeof(int));
cudaMalloc((void**)&d_c,n*sizeof(int));
int seg1=n*(7/10);
int seg2=n-seg1;
cudaMemcpyAsync(d_a,h_a+i, seg1*sizeof(int),cudaMemcpyHostToDevice,stream0);
cudaMemcpyAsync(d_b,h_b+i, seg1*sizeof(int),cudaMemcpyHostToDevice,stream0);
cudaMemcpyAsync(d_a,h_a+i+seg1, seg2*sizeof(int),cudaMemcpyHostToDevice,stream1);
cudaMemcpyAsync(d_b,h_b+i+seg1, seg2*sizeof(int),cudaMemcpyHostToDevice,stream1);
add<<<1,seg1,0,stream0>>>(d_a,d_b,d_c,seg1);
add<<<1,seg2,0,stream1>>>(d_a,d_b,d_c,seg2);
cudaMemcpyAsync(h_c+i,d_c,seg1*sizeof(int),cudaMemcpyDeviceToHost,stream0);
cudaMemcpyAsync(h_c+i+seg1,d_c,seg2*sizeof(int),cudaMemcpyDeviceToHost,stream1);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time,start,stop);
printf("Time:%3.1f\n",elapsed_time);
for(int i=0; i<20*n; i++)
printf("%d ",h_c[i]);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaEventDestroy(stop);
cudaEventDestroy(start);
cudaStreamDestroy(stream0);
cudaStreamDestroy(stream1);
return 0;
}
|
23,045 | __global__ void simple_loop(int *a) {
int tid = threadIdx.x;
for (int i = 0; i < 5; i++) {
a[tid * 4] += a[tid * 4 + i];
}
}
|
23,046 | #include "includes.h"
__global__ void kernel_set_vector_to_zero(double *d_vec, int dimension)
{
int iam = threadIdx.x;
int bid = blockIdx.x;
int threads_in_block = blockDim.x;
int gid = bid*threads_in_block + iam;
if (gid < dimension){
d_vec[gid] = 0;
}
} |
23,047 | #include <iostream>
#include <stdlib.h>
#include <math.h>
#include <algorithm>
using namespace std;
int base[3][4];
int base7[3][7];
int tranposeBase7[7][3];
int base8[3][8];
int base11[3][11];
int base12[3][12];
int base13[3][13];
int base14[3][14];
int board7[7][7];
int board8[8][8];
void loadData()
{
//base 3*4
base[0][0]=1;
base[0][1]=4;
base[0][2]=7;
base[0][3]=10;
base[1][0]=8;
base[1][1]=11;
base[1][2]=2;
base[1][3]=5;
base[2][0]=3;
base[2][1]=6;
base[2][2]=9;
base[2][3]=12;
//base 3*7
base7[0][0]=1;
base7[0][1]=14;
base7[0][2]=17;
base7[0][3]=20;
base7[0][4]=9;
base7[0][5]=4;
base7[0][6]=7;
base7[1][0]=16;
base7[1][1]=19;
base7[1][2]=12;
base7[1][3]=3;
base7[1][4]=6;
base7[1][5]=21;
base7[1][6]=10;
base7[2][0]=13;
base7[2][1]=2;
base7[2][2]=15;
base7[2][3]=18;
base7[2][4]=11;
base7[2][5]=8;
base7[2][6]=5;
//Tranpose base 7
for(int x = 0; x < 3; x++)
{
for(int y = 0; y < 7 ; y++)
{
tranposeBase7[y][x] = base7[x][y];
}
}
//base 3*8
base8[0][0]=1;
base8[0][1]=16;
base8[0][2]=3;
base8[0][3]=22;
base8[0][4]=19;
base8[0][5]=12;
base8[0][6]=7;
base8[0][7]=10;
base8[1][0]=4;
base8[1][1]=21;
base8[1][2]=18;
base8[1][3]=15;
base8[1][4]=6;
base8[1][5]=9;
base8[1][6]=24;
base8[1][7]=13;
base8[2][0]=17;
base8[2][1]=2;
base8[2][2]=5;
base8[2][3]=20;
base8[2][4]=23;
base8[2][5]=14;
base8[2][6]=11;
base8[2][7]=8;
//base 3*11
for(int x = 0; x < 3; x++)
{
for(int y = 0; y < 11 ; y++)
{
if(y < 4)
base11[x][y] = base[x][y];
else
base11[x][y] = base7[x][y-4]+3*4;
}// end of inner loop
}
//base 3*12
for(int x = 0; x < 3; x++)
{
for(int y =0; y<12; y++)
{
if(y<4)
base12[x][y] = base[x][y];
else
base12[x][y] = base8[x][y-4]+3*4;
}
}
//board 3*13
base13[0][0]=1;
base13[0][1]=4;
base13[0][2]=13;
base13[0][3]=16;
base13[0][4]=21;
base13[0][5]=8;
base13[0][6]=23;
base13[0][7]=18;
base13[0][8]=35;
base13[0][9]=38;
base13[0][10]=27;
base13[0][11]=32;
base13[0][12]=29;
base13[1][0]=12;
base13[1][1]=15;
base13[1][2]=6;
base13[1][3]=3;
base13[1][4]=10;
base13[1][5]=17;
base13[1][6]=20;
base13[1][7]=37;
base13[1][8]=24;
base13[1][9]=33;
base13[1][10]=30;
base13[1][11]=39;
base13[1][12]=26;
base13[2][0]=5;
base13[2][1]=2;
base13[2][2]=11;
base13[2][3]=14;
base13[2][4]=7;
base13[2][5]=22;
base13[2][6]=9;
base13[2][7]=34;
base13[2][8]=19;
base13[2][9]=36;
base13[2][10]=25;
base13[2][11]=28;
base13[2][12]=31;
//base 3*14
for(int x = 0; x < 3; x++)
{
for(int y =0; y<14; y++)
{
if(y < 7)
base14[x][y] = base7[x][y];
else
base14[x][y] = base7[x][y-7]+3*7;
}
}
}
int blockOfFour(int n) // getting num blocks of four in each stripe.
{
if(n < 11)
{
return 0;
}
else
{
int num = 0;
switch(n%4)
{
case 0:
num = (n-8);
break;
case 1:
num = (n-13);
break;
case 2:
num = (n-14);
break;
case 3:
num = (n-7);
break;
}
return num;
}
}
int** create2DArray(int n)
{
int** array2D = 0;
//transpose 3x7 to 7*3
array2D = new int*[7]; // height is 7
for (int h = 0; h < 7; h++)
{
array2D[h] = new int[n-7];
//initialize array
for (int w = 0; w < (n-7); w++)
{
//if(w < )
//array2D[h][w] = w + width * h;
}
}
return array2D;
}
void solveBoard(int n)
{
int board[n][n]; // initialize the board
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
board[i][j] = 0;
int BaseOfFour = blockOfFour(n)/4; // number blocks of 3*4
switch(n % 3)
{
case 0: // for all board size that is divisibe by 3
for (int x = 0; x < 3; x++)
{
for(int y = 0; y < n; y++)
{
if(y < blockOfFour(n))
{
int temp = y/4;
for(int i = 0; i < n; i+= 6) // parrallel here parrallel here i+6 to get stripe without flipping order
{
int stride = i/3;
board[x+i][y] = base[x][y%4]+ temp*12 + 3*n*stride;
if(x+3+i < n)
board[x+3+i][n-y-1] = base[x][y%4]+ temp*12 + 3*n*(stride+1);
}
}
else
{
for(int i = 0 ; i < n; i+= 6) // parallel here
{
int stride = i/3;
if(n % 4 == 0)
{
board[x+i][y] = base8[x][y-blockOfFour(n)]+BaseOfFour*12 + 3*n*stride;
board[x+3+i][n-y-1] = base8[x][y-blockOfFour(n)]+ BaseOfFour*12 + 3*n*(stride+1); //using base 8
}
if(n % 4 == 1)
{
board[x+i][y] = base13[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n)
board[x+3+i][n-y-1] = base13[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1); //using base 13
}
if(n % 4 == 2)
{
board[x+i][y] = base14[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n)
board[x+3+i][n-y-1] = base14[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1); // using base 14
}
if(n % 4 == 3)
{
board[x+i][y] = base7[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n)
board[x+3+i][n-y-1] = base7[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1); // using base 7
}
}
}
}
}
break;
case 1:
for (int x = 0; x < 3; x++)
{
for(int y = 0; y < n; y++)
{
if(y < blockOfFour(n))
{
int temp = y/4; // temp get index of blockOf4
for(int i = 0; i < n-7; i+= 6) //
{
int stride = i/3;
board[x+i][y] = base[x][y%4]+ temp*12 + 3*n*stride;
if(x+3+i < n-7)
board[x+3+i][n-y-1] = base[x][y%4]+ temp*12 + 3*n*(stride+1);
}
}// end of if
else
{
for(int i = 0 ; i < n-7; i+= 6) // parallel here
{
int stride = i/3;
if(n % 4 == 0)
{
board[x+i][y] = base8[x][y-blockOfFour(n)]+BaseOfFour*12 + 3*n*stride;
if(x+3+i < n-7)// Don't want to get in the 7 stride
board[x+3+i][n-y-1] = base8[x][y-blockOfFour(n)]+ BaseOfFour*12 + 3*n*(stride+1);
}
if(n % 4 == 1)
{
board[x+i][y] = base13[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n-7) // Don't want to get in the 7 stride
board[x+3+i][n-y-1] = base13[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1);
}
if(n % 4 == 2)
{
board[x+i][y] = base14[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n-7) // Don't want to get in the 7 stride
board[x+3+i][n-y-1] = base14[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1);
}
if(n % 4 == 3)
{
board[x+i][y] = base7[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n-7) // Don't want to get in the 7 stride
board[x+3+i][n-y-1] = base7[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1);
}
}
}// end of else
}// end of for y
}// end of big 4
//Handling 7*n here
for(int x = 0; x < 7; x++)
{
for(int y = 0; y < n-7 ; y++) // NOTE !!!!
{
if(y%6 == 0) //0,6,12....
{
int temp = y/6;
board[n+x-7][y] = tranposeBase7[x][y%3]+ 2*temp*21;
board[n+x-7][y+1] = tranposeBase7[x][y%3+1]+ 2*temp*21;
board[n+x-7][y+2] = tranposeBase7[x][y%3+2]+ 2*temp*21;
}
else if( y%3 == 0) //3,9,15,21...
{
int temp = y/6;
board[n-x-1][y] = tranposeBase7[x][y%3]+ 21 + 2*temp*21;
board[n-x-1][y+1] = tranposeBase7[x][y%3+1]+ 21 + 2*temp*21;
board[n-x-1][y+2] = tranposeBase7[x][y%3+2]+ 21 + 2*temp*21;
}
/*int temp = y/3; // getting index of block of 7*3
for(int i = 0; i < n-7; i+= 3) //
{
int stride = i/3;
board[n+x-7][y] = tranposeBase7[x][y%3] + temp*21 + 3*n*stride;
if(x+3+i < n-7)
board[x+3+i][n-y-1] = base[x][y%4]+ temp*12 + 3*n*(stride+1);
}*/
}
}
break;
case 2:
for (int x = 0; x < 3; x++)
{
for(int y = 0; y < n; y++)
{
if(y < blockOfFour(n))
{
int temp = y/4;
for(int i = 0; i < n-8; i+= 6)
{
int stride = i/3;
board[x+i][y] = base[x][y%4]+ temp*12 + 3*n*stride;
if(x+3+i < n-8)
board[x+3+i][n-y-1] = base[x][y%4]+ temp*12 + 3*n*(stride+1);
}
}// end of if
else
{
for(int i = 0 ; i < n-8; i+= 6)
{
int stride = i/3;
if(n % 4 == 0)
{
board[x+i][y] = base8[x][y-blockOfFour(n)]+BaseOfFour*12 + 3*n*stride;
if(x+3+i < n-8)// Don't want to get in the 8 stride
board[x+3+i][n-y-1] = base8[x][y-blockOfFour(n)]+ BaseOfFour*12 + 3*n*(stride+1);
}
if(n % 4 == 1)
{
board[x+i][y] = base13[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n-8) // Don't want to get in the 8 stride
board[x+3+i][n-y-1] = base13[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1);
}
if(n % 4 == 2)
{
board[x+i][y] = base14[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n-8) // Don't want to get in the 8 stride
board[x+3+i][n-y-1] = base14[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1);
}
if(n % 4 == 3)
{
board[x+i][y] = base7[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n-8) // Don't want to get in the 8 stride
board[x+3+i][n-y-1] = base7[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1);
}
}
}// end of else
}// end of for y
}// end of big 4
// Handling 8*n stride here
} // end of switch
for (int x = 0; x < n; x++) {
for (int y = 0; y < n; y++)
cout << board[x][y]<< "\t";
cout << endl;
}
}
int main()
{
loadData();
int n;
cout << "Enter size of board:";
cin >> n;
solveBoard(n);
/*for(int x = 0; x < 3; x++)
{
for(int y = 0; y < 7 ; y++)
cout << base7[x][y] << "\t";
cout << endl;
}
for(int x = 0; x < 7; x++)
{
for(int y = 0; y < 3 ; y++)
cout << tranposeBase7[x][y] << "\t";
cout << endl;
}*/
return 0;
}
|
23,048 | /*-----------------------------------------------------------
** gaussian.cu -- The program is to solve a linear system Ax = b
** by using Gaussian Elimination. The algorithm on page 101
** ("Foundations of Parallel Programming") is used.
** The sequential version is gaussian.c. This parallel
** implementation converts three independent for() loops
** into three Fans. Use the data file ge_3.dat to verify
** the correction of the output.
**
** Written by Andreas Kura, 02/15/95
** Modified by Chong-wei Xu, 04/20/95
** Modified by Chris Gregg for CUDA, 07/20/2009
**-----------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "cuda.h"
#include <string.h>
#include <math.h>
#ifdef RD_WG_SIZE_0_0
#define MAXBLOCKSIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define MAXBLOCKSIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define MAXBLOCKSIZE RD_WG_SIZE
#else
#define MAXBLOCKSIZE 512
#endif
//2D defines. Go from specific to general
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_XY RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_XY RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_XY RD_WG_SIZE
#else
#define BLOCK_SIZE_XY 4
#endif
int Size;
float *a, *b, *finalVec;
float *m;
FILE *fp;
void InitProblemOnce(char *filename);
void InitPerRun();
void ForwardSub();
void BackSub();
__global__ void Fan1(float *m, float *a, int Size, int t);
__global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t);
void InitMat(float *ary, int nrow, int ncol);
void InitAry(float *ary, int ary_size);
void PrintMat(float *ary, int nrow, int ncolumn);
void PrintAry(float *ary, int ary_size);
void PrintDeviceProperties();
void checkCUDAError(const char *msg);
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(float *m, int size){
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
int main(int argc, char *argv[])
{
printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY);
int verbose = 1;
int i, j;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
//PrintDeviceProperties();
//char filename[100];
//sprintf(filename,"matrices/matrix%d.txt",size);
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
a = (float *) malloc(Size * Size * sizeof(float));
create_matrix(a, Size);
b = (float *) malloc(Size * sizeof(float));
for (j =0; j< Size; j++)
b[j]=1.0;
m = (float *) malloc(Size * Size * sizeof(float));
break;
case 'f': // platform
i++;
printf("Read file from %s \n", argv[i]);
InitProblemOnce(argv[i]);
break;
case 'q': // quiet
verbose = 0;
break;
}
}
}
//InitProblemOnce(filename);
InitPerRun();
//begin timing
struct timeval time_start;
gettimeofday(&time_start, NULL);
// run kernels
ForwardSub();
//end timing
struct timeval time_end;
gettimeofday(&time_end, NULL);
unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
if (verbose) {
printf("Matrix m is: \n");
PrintMat(m, Size, Size);
printf("Matrix a is: \n");
PrintMat(a, Size, Size);
printf("Array b is: \n");
PrintAry(b, Size);
}
BackSub();
if (verbose) {
printf("The final solution is: \n");
PrintAry(finalVec,Size);
}
printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6);
printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6);
/*printf("%d,%d\n",size,time_total);
fprintf(stderr,"%d,%d\n",size,time_total);*/
free(m);
free(a);
free(b);
}
/*------------------------------------------------------
** PrintDeviceProperties
**-----------------------------------------------------
*/
void PrintDeviceProperties(){
cudaDeviceProp deviceProp;
int nDevCount = 0;
cudaGetDeviceCount( &nDevCount );
printf( "Total Device found: %d", nDevCount );
for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx )
{
memset( &deviceProp, 0, sizeof(deviceProp));
if( cudaSuccess == cudaGetDeviceProperties(&deviceProp, nDeviceIdx))
{
printf( "\nDevice Name \t\t - %s ", deviceProp.name );
printf( "\n**************************************");
printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 );
printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 );
printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock );
printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize );
printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch );
printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock );
printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] );
printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] );
printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem );
printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor );
printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate );
printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment );
printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" );
printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount );
}
else
printf( "\n%s", cudaGetErrorString(cudaGetLastError()));
}
}
/*------------------------------------------------------
** InitProblemOnce -- Initialize all of matrices and
** vectors by opening a data file specified by the user.
**
** We used dynamic array *a, *b, and *m to allocate
** the memory storages.
**------------------------------------------------------
*/
void InitProblemOnce(char *filename)
{
//char *filename = argv[1];
//printf("Enter the data file name: ");
//scanf("%s", filename);
//printf("The file name is: %s\n", filename);
fp = fopen(filename, "r");
fscanf(fp, "%d", &Size);
a = (float *) malloc(Size * Size * sizeof(float));
InitMat(a, Size, Size);
//printf("The input matrix a is:\n");
//PrintMat(a, Size, Size);
b = (float *) malloc(Size * sizeof(float));
InitAry(b, Size);
//printf("The input array b is:\n");
//PrintAry(b, Size);
m = (float *) malloc(Size * Size * sizeof(float));
}
/*------------------------------------------------------
** InitPerRun() -- Initialize the contents of the
** multipier matrix **m
**------------------------------------------------------
*/
void InitPerRun()
{
int i;
for (i=0; i<Size*Size; i++)
*(m+i) = 0.0;
}
/*-------------------------------------------------------
** Fan1() -- Calculate multiplier matrix
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
** of t which is defined on the ForwardSub().
**-------------------------------------------------------
*/
__global__ void Fan1(float *m_cuda, float *a_cuda, int Size, int t)
{
//if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf(".");
//printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t);
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
*(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t);
}
/*-------------------------------------------------------
** Fan2() -- Modify the matrix A into LUD
**-------------------------------------------------------
*/
__global__ void Fan2(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)];
//a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t];
if(yidx == 0){
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
//printf("xidx:%d,yidx:%d\n",xidx,yidx);
b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
}
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
void ForwardSub()
{
int t;
float *m_cuda,*a_cuda,*b_cuda;
// allocate memory on GPU
cudaMalloc((void **) &m_cuda, Size * Size * sizeof(float));
cudaMalloc((void **) &a_cuda, Size * Size * sizeof(float));
cudaMalloc((void **) &b_cuda, Size * sizeof(float));
// copy memory to GPU
cudaMemcpy(m_cuda, m, Size * Size * sizeof(float),cudaMemcpyHostToDevice );
cudaMemcpy(a_cuda, a, Size * Size * sizeof(float),cudaMemcpyHostToDevice );
cudaMemcpy(b_cuda, b, Size * sizeof(float),cudaMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
// begin timing kernels
struct timeval time_start;
gettimeofday(&time_start, NULL);
for (t=0; t<(Size-1); t++) {
Fan1<<<dimGrid,dimBlock>>>(m_cuda,a_cuda,Size,t);
cudaThreadSynchronize();
Fan2<<<dimGridXY,dimBlockXY>>>(m_cuda,a_cuda,b_cuda,Size,Size-t,t);
cudaThreadSynchronize();
checkCUDAError("Fan2");
}
// end timing kernels
struct timeval time_end;
gettimeofday(&time_end, NULL);
totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
// copy memory back to CPU
cudaMemcpy(m, m_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaMemcpy(a, a_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaMemcpy(b, b_cuda, Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaFree(m_cuda);
cudaFree(a_cuda);
cudaFree(b_cuda);
}
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
void BackSub()
{
// create a new vector to hold the final answer
finalVec = (float *) malloc(Size * sizeof(float));
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
void InitMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
fscanf(fp, "%f", ary+Size*i+j);
}
}
}
/*------------------------------------------------------
** PrintMat() -- Print the contents of the matrix
**------------------------------------------------------
*/
void PrintMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
printf("%8.2f ", *(ary+Size*i+j));
}
printf("\n");
}
printf("\n");
}
/*------------------------------------------------------
** InitAry() -- Initialize the array (vector) by reading
** data from the data file
**------------------------------------------------------
*/
void InitAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
fscanf(fp, "%f", &ary[i]);
}
}
/*------------------------------------------------------
** PrintAry() -- Print the contents of the array (vector)
**------------------------------------------------------
*/
void PrintAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
printf("%.2f ", ary[i]);
}
printf("\n\n");
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
23,049 | #include <stdio.h>
__global__ void matrixs_1D_multiplication(int *matrix_a_dev, int *matrix_b_dev, int *matrix_c_dev, int matrix_width)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < matrix_width && col < matrix_width)
{
for(int k = 0; k < matrix_width; k++)
{
matrix_c_dev[row * matrix_width + col] += matrix_a_dev[row * matrix_width + k] * matrix_b_dev[k * matrix_width + col];
}
}
}
int main()
{
int matrix_width = 3;
int *matrix_a_host;
int *matrix_b_host;
int *matrix_c_host;
matrix_a_host = (int *)malloc(matrix_width*matrix_width*sizeof(int));
matrix_b_host = (int *)malloc(matrix_width*matrix_width*sizeof(int));
matrix_c_host = (int *)malloc(matrix_width*matrix_width*sizeof(int));
for(int row = 0; row < matrix_width; row++)
{
for(int col = 0; col < matrix_width; col++)
{
matrix_a_host[row * matrix_width + col] = row + col;
matrix_b_host[row * matrix_width + col] = row * col + col;
}
}
// ------------------GPU--------------------------
int *matrix_a_dev;
int *matrix_b_dev;
int *matrix_c_dev;
cudaMalloc((void**) &matrix_a_dev, matrix_width*matrix_width*sizeof(int));
cudaMalloc((void**) &matrix_b_dev, matrix_width*matrix_width*sizeof(int));
cudaMalloc((void**) &matrix_c_dev, matrix_width*matrix_width*sizeof(int));
cudaMemcpy(matrix_a_dev, matrix_a_host, matrix_width*matrix_width*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matrix_b_dev, matrix_b_host, matrix_width*matrix_width*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(3, 3, 1);
matrixs_1D_multiplication<<<dimGrid, dimBlock>>>(matrix_a_dev, matrix_b_dev, matrix_c_dev, matrix_width);
cudaMemcpy(matrix_c_host, matrix_c_dev, matrix_width*matrix_width*sizeof(int), cudaMemcpyDeviceToHost);
printf("\n-------------Matrix c-----------------\n");
for(int i = 0; i < matrix_width * matrix_width; i++)
{
if((i + 1) % matrix_width)
printf("%d ", *(matrix_c_host + i));
else
printf("%d \n", *(matrix_c_host + i));
}
free(matrix_a_host);
free(matrix_b_host);
free(matrix_c_host);
cudaFree(matrix_a_dev);
cudaFree(matrix_b_dev);
cudaFree(matrix_c_dev);
return 1;
}
|
23,050 | // Dummy file to trigger CUDA compile in this project |
23,051 | // dijkstra 算法的并行自全源加未更新快速退出
__global__ void dijkstra(int* V, int* E, int* W, int* n, int* vis, int* dist, int* predist){
const int u0 = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
const int offset = blockDim.x * blockDim.y * blockDim.z; // the number of threads in a block
const int blockNum = (const int) gridDim.x * gridDim.y; // the number of block
int u = -1;
int sn = -1;
int s = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
__shared__ int quickBreak[1];
while(s < (*n)){ // source must valid
sn = (s * (*n));
for(int i = 0; i < (*n); i++){
quickBreak[0] = 0;
u = u0;
while(u < *n){
if(vis[u + sn] == 0){
vis[u + sn] = 1;
for(int j = V[u]; j < V[u + 1]; j++){ // the end of j, j is the index of E and W, E[j] is the end of this edge, W[j] is the weight of this edge.
atomicMin(&predist[E[j] + sn], dist[u + sn] + W[j]); // s is source.
}
}
u += offset;
}
__syncthreads();
u = u0;
while(u < (*n)){
if(predist[u + sn] < dist[u + sn]){
dist[u + sn] = predist[u + sn];
vis[u + sn] = 0;
quickBreak[0] = 1;
}
u += offset;
}
__syncthreads();
if(quickBreak[0] == 0){
break;
}
__syncthreads();
}
s += blockNum; // next vertex
}
}
// base is the start index of E
__global__ void divide(int* V, int* E, int* W, int* n, int* flag, int* base, int* part, int* vis, int* dist, int* predist){
const int u0 = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
const int offset = blockDim.x * blockDim.y * blockDim.z; // the number of threads in a block
int u = -1;
int l = -1;
int r = -1;
int localBase = base[0];
int localPart = part[0];
u = u0; // this turn, u is not the number of vertex, it's just a offset to tell out of range(part), and u+l is the true number of vertex.
while(u < (*n)){ // the vertex represented by the current thread is in video memory.
if(V[u + 1] <= localBase){ // self right
u += offset;
continue; // the range of vertex's edge is illegal.
}
else if(V[u] >= localBase + localPart){ // self left
u += offset;
continue; // the range of vertex's edge is illegal.
}
// dist is updated
if(vis[u]){
// different part is not ordered, so can not use vis to tell.
//vis[u] -= 1; //
atomicSub(&vis[u], 1); // set the ability of running sub one.
// Shrink the range
l = localBase>V[u]?localBase:V[u];
r = (localBase + localPart)<V[u + 1]?(localBase + localPart):V[u + 1];
for(int j = l; j < r; j++){ // get the end of u, j is the index of E and W, E[j] is the end of this edge, W[j] is the weight of this edge.
atomicMin(&predist[E[j - localBase]], dist[u] + W[j - localBase]); // the index is not true, need to add offset.
}
}
u += offset;
}
__syncthreads();
u = u0;
while(u < (*n)){
if(predist[u] < dist[u]){
dist[u] = predist[u];
vis[u] = (V[u + 1] + localPart - 1) / localPart - V[u] / localPart; // recalc the ability of running.
flag[0] = 1;
}
u += offset;
}
}
|
23,052 | #include <cuda.h>
#include <cuda_runtime.h>
#include<iostream>
__global__ void kernel_update_models(float4* d_positions, float4* d_modelBuffer, int numel) {
size_t col = threadIdx.x + blockIdx.x * blockDim.x;
if (col >= numel) { return; }
d_modelBuffer[col*4+3] = make_float4(
d_positions[col].x,
d_positions[col].y,
d_positions[col].z,
1
);
__syncthreads();
}
void launch_kernel_models(float4* d_positions, float4* d_modelBuffer, int numel) {
dim3 blockSize(1024, 1, 1);
dim3 gridSize(1);
gridSize.x = numel / blockSize.x + 1;
kernel_update_models <<< gridSize, blockSize >>> (d_positions, d_modelBuffer, numel);
cudaDeviceSynchronize();
} |
23,053 | #include "includes.h"
__global__ static void gaussdensity_direct_tex(int natoms, const float4 *xyzr, const float4 *colors, float gridspacing, unsigned int z, float *densitygrid, float3 *voltexmap, float invisovalue) {
unsigned int xindex = (blockIdx.x * blockDim.x) * DUNROLLX + threadIdx.x;
unsigned int yindex = (blockIdx.y * blockDim.y) + threadIdx.y;
unsigned int zindex = (blockIdx.z * blockDim.z) + threadIdx.z;
unsigned int outaddr =
((gridDim.x * blockDim.x) * DUNROLLX) * (gridDim.y * blockDim.y) * zindex +
((gridDim.x * blockDim.x) * DUNROLLX) * yindex + xindex;
zindex += z;
float coorx = gridspacing * xindex;
float coory = gridspacing * yindex;
float coorz = gridspacing * zindex;
float densityvalx1=0.0f;
float densityvalx2=0.0f;
float3 densitycolx1;
densitycolx1=make_float3(0.0f, 0.0f, 0.0f);
float3 densitycolx2=densitycolx1;
#if DUNROLLX >= 4
float densityvalx3=0.0f;
float densityvalx4=0.0f;
float3 densitycolx3=densitycolx1;
float3 densitycolx4=densitycolx1;
#endif
#if DUNROLLX >= 8
float densityvalx5=0.0f;
float densityvalx6=0.0f;
float densityvalx7=0.0f;
float densityvalx8=0.0f;
float3 densitycolx5=densitycolx1;
float3 densitycolx6=densitycolx1;
float3 densitycolx7=densitycolx1;
float3 densitycolx8=densitycolx1;
#endif
float gridspacing_coalesce = gridspacing * DBLOCKSZX;
int atomid;
for (atomid=0; atomid<natoms; atomid++) {
float4 atom = xyzr[atomid];
float4 color = colors[atomid];
float dy = coory - atom.y;
float dz = coorz - atom.z;
float dyz2 = dy*dy + dz*dz;
float dx1 = coorx - atom.x;
float r21 = (dx1*dx1 + dyz2) * atom.w;
float tmp1 = exp2f(-r21);
densityvalx1 += tmp1;
tmp1 *= invisovalue;
densitycolx1.x += tmp1 * color.x;
densitycolx1.y += tmp1 * color.y;
densitycolx1.z += tmp1 * color.z;
float dx2 = dx1 + gridspacing_coalesce;
float r22 = (dx2*dx2 + dyz2) * atom.w;
float tmp2 = exp2f(-r22);
densityvalx2 += tmp2;
tmp2 *= invisovalue;
densitycolx2.x += tmp2 * color.x;
densitycolx2.y += tmp2 * color.y;
densitycolx2.z += tmp2 * color.z;
#if DUNROLLX >= 4
float dx3 = dx2 + gridspacing_coalesce;
float r23 = (dx3*dx3 + dyz2) * atom.w;
float tmp3 = exp2f(-r23);
densityvalx3 += tmp3;
tmp3 *= invisovalue;
densitycolx3.x += tmp3 * color.x;
densitycolx3.y += tmp3 * color.y;
densitycolx3.z += tmp3 * color.z;
float dx4 = dx3 + gridspacing_coalesce;
float r24 = (dx4*dx4 + dyz2) * atom.w;
float tmp4 = exp2f(-r24);
densityvalx4 += tmp4;
tmp4 *= invisovalue;
densitycolx4.x += tmp4 * color.x;
densitycolx4.y += tmp4 * color.y;
densitycolx4.z += tmp4 * color.z;
#endif
#if DUNROLLX >= 8
float dx5 = dx4 + gridspacing_coalesce;
float r25 = (dx5*dx5 + dyz2) * atom.w;
float tmp5 = exp2f(-r25);
densityvalx5 += tmp5;
tmp5 *= invisovalue;
densitycolx5.x += tmp5 * color.x;
densitycolx5.y += tmp5 * color.y;
densitycolx5.z += tmp5 * color.z;
float dx6 = dx5 + gridspacing_coalesce;
float r26 = (dx6*dx6 + dyz2) * atom.w;
float tmp6 = exp2f(-r26);
densityvalx6 += tmp6;
tmp6 *= invisovalue;
densitycolx6.x += tmp6 * color.x;
densitycolx6.y += tmp6 * color.y;
densitycolx6.z += tmp6 * color.z;
float dx7 = dx6 + gridspacing_coalesce;
float r27 = (dx7*dx7 + dyz2) * atom.w;
float tmp7 = exp2f(-r27);
densityvalx7 += tmp7;
tmp7 *= invisovalue;
densitycolx7.x += tmp7 * color.x;
densitycolx7.y += tmp7 * color.y;
densitycolx7.z += tmp7 * color.z;
float dx8 = dx7 + gridspacing_coalesce;
float r28 = (dx8*dx8 + dyz2) * atom.w;
float tmp8 = exp2f(-r28);
densityvalx8 += tmp8;
tmp8 *= invisovalue;
densitycolx8.x += tmp8 * color.x;
densitycolx8.y += tmp8 * color.y;
densitycolx8.z += tmp8 * color.z;
#endif
}
densitygrid[outaddr ] += densityvalx1;
voltexmap[outaddr ].x += densitycolx1.x;
voltexmap[outaddr ].y += densitycolx1.y;
voltexmap[outaddr ].z += densitycolx1.z;
densitygrid[outaddr+1*DBLOCKSZX] += densityvalx2;
voltexmap[outaddr+1*DBLOCKSZX].x += densitycolx2.x;
voltexmap[outaddr+1*DBLOCKSZX].y += densitycolx2.y;
voltexmap[outaddr+1*DBLOCKSZX].z += densitycolx2.z;
#if DUNROLLX >= 4
densitygrid[outaddr+2*DBLOCKSZX] += densityvalx3;
voltexmap[outaddr+2*DBLOCKSZX].x += densitycolx3.x;
voltexmap[outaddr+2*DBLOCKSZX].y += densitycolx3.y;
voltexmap[outaddr+2*DBLOCKSZX].z += densitycolx3.z;
densitygrid[outaddr+3*DBLOCKSZX] += densityvalx4;
voltexmap[outaddr+3*DBLOCKSZX].x += densitycolx4.x;
voltexmap[outaddr+3*DBLOCKSZX].y += densitycolx4.y;
voltexmap[outaddr+3*DBLOCKSZX].z += densitycolx4.z;
#endif
#if DUNROLLX >= 8
densitygrid[outaddr+4*DBLOCKSZX] += densityvalx5;
voltexmap[outaddr+4*DBLOCKSZX].x += densitycolx5.x;
voltexmap[outaddr+4*DBLOCKSZX].y += densitycolx5.y;
voltexmap[outaddr+4*DBLOCKSZX].z += densitycolx5.z;
densitygrid[outaddr+5*DBLOCKSZX] += densityvalx6;
voltexmap[outaddr+5*DBLOCKSZX].x += densitycolx6.x;
voltexmap[outaddr+5*DBLOCKSZX].y += densitycolx6.y;
voltexmap[outaddr+5*DBLOCKSZX].z += densitycolx6.z;
densitygrid[outaddr+6*DBLOCKSZX] += densityvalx7;
voltexmap[outaddr+6*DBLOCKSZX].x += densitycolx7.x;
voltexmap[outaddr+6*DBLOCKSZX].y += densitycolx7.y;
voltexmap[outaddr+6*DBLOCKSZX].z += densitycolx7.z;
densitygrid[outaddr+7*DBLOCKSZX] += densityvalx8;
voltexmap[outaddr+7*DBLOCKSZX].x += densitycolx8.x;
voltexmap[outaddr+7*DBLOCKSZX].y += densitycolx8.y;
voltexmap[outaddr+7*DBLOCKSZX].z += densitycolx8.z;
#endif
} |
23,054 | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <curand_kernel.h>
#define ITER_PER_THREAD 256
#define NUMBER_OF_THREAD 256
__global__ void pi_cal(long *niter, long *a,curandState *state){
long idx = (blockDim.x * blockIdx.x) + threadIdx.x;
long count = 0;
float x,y,z;
curand_init(110919, idx , 0, &state[idx]);
for(long i=0;i<ITER_PER_THREAD;i++){
x = curand_uniform(&state[idx]);
y = curand_uniform(&state[idx]);
z = (x*x)+(y*y);
if((idx*ITER_PER_THREAD+i) < (*niter) && z<=1.0f){
count+=1;
}
}
a[idx] = count;
}
int main(int argc,char **argv){
if(argc!=2){
printf("Usage: <exefile> <number_of_iteration>\n");
return 0;
}
long niter = atol(argv[1]);
int number_of_blocks = ceil(((double)(niter)/(ITER_PER_THREAD*NUMBER_OF_THREAD)));
double pi =0.0;
//printf("number of_block %d\n",number_of_blocks);
long m_size = number_of_blocks * NUMBER_OF_THREAD;
long *final_memory = (long *) malloc(m_size*sizeof(long));
memset(final_memory,0,m_size*sizeof(long));
long *d_a;
long *d_iter;
curandState *d_states;
//allocate memory for current state to generate random number on device
cudaMalloc((void **)&d_a,m_size*sizeof(long));
cudaMalloc((void **)&d_states,m_size*sizeof(curandState));
cudaMalloc((void **)&d_iter, sizeof(long));
//copy the value of niter to device iter
cudaMemcpy(d_iter,&niter,sizeof(long),cudaMemcpyHostToDevice);
//invoke gpu kernal program for pi calculation
pi_cal<<<number_of_blocks,NUMBER_OF_THREAD>>>(d_iter,d_a,d_states);
//copy the result from d_a to hosts final_memory array
cudaMemcpy(final_memory,d_a,m_size*sizeof(long),cudaMemcpyDeviceToHost);
//Now sum all the values in host final_memory to pi
for(long i=0;i<m_size;i++){
pi+=final_memory[i];
}
//printf("m_size %d pi %lf\n",m_size,pi);
//divide the final pi value by size
pi = (double)(4.0 * pi) / (double)niter;
//pi /= m_size;
printf("# of trials= %d , estimate of pi is %.16lf \n",niter,pi);
//free the allocated memory
free(final_memory);
cudaFree(d_a);
cudaFree(d_states);
cudaFree(d_iter);
return 0;
}
|
23,055 | #include <cstdlib>
#include <cassert>
#include <iostream>
// __global__ indicates it will called from the host and run on the device
// __device__ is for device/device and __host__ for host/host
__global__ void matrixMul (float*a, float* b, float* c, int N)
{
// get the global thread ID
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
// put a predication to check whether the element for that thread
// exists in the array
if (row < N && col < N)
{
int temp = 0;
for (int i = 0; i < N; i++)
{
temp += a[row*N + i] * b[i*N + col];
}
c[row*N + col] = temp;
}
}
int main ()
{
int N = 1 << 7;
size_t bytes = N * N * sizeof(float);
// Allocate memory on the host size
float *a, *b, *c;
// using CUDA unified memory - we do not need to do memcpy to/from the GPU
// this can be accessed from the host and device
cudaMallocManaged(&a, bytes);
cudaMallocManaged(&b, bytes);
cudaMallocManaged(&c, bytes);
for (int i = 0; i < N * N; i++)
{
a[i] = rand() % 100;
b[i] = rand() % 100;
}
// number of thread blocks and threads per block
const int THREADS_1D = 16;
const int BLOCKS_1D = (N + THREADS_1D - 1)/THREADS_1D;
// setup kernel launch parameters
dim3 THREADS (THREADS_1D, THREADS_1D);
dim3 BLOCKS (BLOCKS_1D, BLOCKS_1D);
void* args[4] = {&a, &b, &c, &N};
cudaLaunchKernel((const void*) &matrixMul, BLOCKS, THREADS, (void**) &args);
cudaDeviceSynchronize();
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
int temp = 0;
for (int k = 0; k < N; k++)
{
temp += a[i*N + k] * b[k*N + j];
}
assert(c[i*N + j] == temp);
}
}
std::cout << "Program completed!" << std::endl;
return 0;
} |
23,056 | #include "includes.h"
__global__ void lots_of_double_compute(double *inputs, int N, size_t niters, double *outputs)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
size_t nthreads = gridDim.x * blockDim.x;
for ( ; tid < N; tid += nthreads)
{
size_t iter;
double val = inputs[tid];
for (iter = 0; iter < niters; iter++)
{
val = (val + 5.0) - 101.0;
val = (val / 3.0) + 102.0;
val = (val + 1.07) - 103.0;
val = (val / 1.037) + 104.0;
val = (val + 3.00) - 105.0;
val = (val / 0.22) + 106.0;
}
outputs[tid] = val;
}
} |
23,057 | // get cuda max hardware concurrency
// by stdio2016 2023-03-18
#include<cuda.h>
#include<stdio.h>
__device__ int current_concurrency = 0;
__device__ void waitClockGpu(int time) {
long long t0 = clock64();
while (clock64() - t0 < time) {
;
}
}
__global__ void concurrency_test(int *max_concurrency) {
atomicAdd(¤t_concurrency, 1);
waitClockGpu(1000000);
atomicMax(max_concurrency, current_concurrency);
waitClockGpu(1000000);
atomicAdd(¤t_concurrency, -1);
}
int main(int a, char*b[]){
int *max_concurrency;
cudaMalloc(&max_concurrency, sizeof(int));
int num_streams = 0;
printf("number of streams: ");
scanf("%d", &num_streams);
cudaStream_t *ts = new cudaStream_t[num_streams];
for (int i = 0; i < num_streams; i++) {
cudaStreamCreate(&ts[i]);
}
for (int i = 0; i < num_streams; i++) {
for (int j = 0; j < 100; j++) {
concurrency_test<<<100, 256, 0, ts[i]>>>(max_concurrency);
}
//cudaStreamQuery(ts[i]);
printf("stream %d sent\n", i);
}
for (int i = 0; i < num_streams; i++) {
cudaStreamSynchronize(ts[i]);
printf("stream %d synchronized\n", i);
}
int num = 0;
cudaMemcpy(&num, max_concurrency, sizeof(int), cudaMemcpyDeviceToHost);
printf("max concurrency: %d\n", num);
for (int i = 0; i < num_streams; i++) {
cudaStreamDestroy(ts[i]);
}
delete[] ts;
cudaFree(max_concurrency);
}
|
23,058 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/count.h>
int main() {
/**
* \brief armando2D v2.0
*
* An SPH code for non stationary fluid dynamics.
* This is the reviewed and improved C version of Armando v1.0
* developed at CERN in 2008
*
* \date May 2, 2012
* \author Luca Massidda
*/
int i, pout;
int hhash[10];
int *dhash;
cudaMalloc((void**) &dhash, (10 * sizeof(int)));
thrust::device_ptr<int> thash(dhash);
thrust::sequence(thash, thash +10, 0);
pout = thrust::count(thash, thash + 5, 3);
printf("%d\n\n", pout);
thrust::copy(thash, thash +10, hhash);
for (i = 0; i < 10; i++) {
printf("%d %d \n", i, hhash[i]);
}
return 0;
}
|
23,059 | #include <iostream>
#include <math.h>
// function to add the elements of two arrays
__global__ void add(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
y[index] = x[index] + y[index];
}
int main(void) {
int N = 1 << 20; // 1M elements
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
clock_t start = clock();
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
//wait for GPU to finish before accessing host
cudaDeviceSynchronize();
clock_t stop = clock();
double elapsed = (double)(stop - start) * 1000.0 /
CLOCKS_PER_SEC;
printf("Time elapsed in ms: %f", elapsed);
printf("HOE!!!!");
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "\nMax error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
23,060 | #include <cstdio>
using namespace std;
__global__ void adder(float* arr, float* block_incrs, int n) {
int tid = threadIdx.x;
extern __shared__ float sum[];
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
sum[0] = block_incrs[blockIdx.x];
__syncthreads();
if (gtid < n) arr[gtid] += sum[0];
}
__global__ void hillis_steele(float* g_idata, float* g_odata, int n,
float* block_sums) {
// all memory writes to be serviced immediately
extern volatile __shared__ float temp[];
int tid = threadIdx.x;
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
int block_size = blockDim.x;
int pout = 0, pin = 1;
float last_element;
// load input into shared memory
if (tid == 0 || gtid >= n)
temp[tid] = 0;
else
temp[tid] = g_idata[gtid - 1];
if (tid == block_size - 1)
if (gtid < n)
last_element = g_idata[gtid];
else
last_element = 0.0;
// entire temp should've gotten populated
__syncthreads();
// if (gtid == 5) {
// std::printf("global : %f===============\n", g_idata[gtid]);
// std::printf("temp : %f===============\n", temp[tid]);
// }
for (int offset = 1; offset < block_size; offset *= 2) {
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (tid >= offset)
temp[pout * block_size + tid] =
temp[pin * block_size + tid] + temp[pin * block_size + tid - offset];
else
temp[pout * block_size + tid] = temp[pin * block_size + tid];
__syncthreads(); // I need this here before I start next iteration
}
if (gtid < n) g_odata[gtid] = temp[pout * block_size + tid];
if (tid == block_size - 1)
block_sums[blockIdx.x] = last_element + temp[pout * block_size + tid];
}
__host__ void scan(const float* in, float* out, unsigned int n,
unsigned int threads_per_block) {
int num_blocks = (n + threads_per_block - 1) / threads_per_block;
// printf("num blocks : %d\n", num_blocks);
float *din, *dout, *block_sums, *block_incrs, *dummy;
cudaMalloc((void**)&din, n * sizeof(float));
cudaMalloc((void**)&dout, n * sizeof(float));
cudaMallocManaged((void**)&block_sums, num_blocks * sizeof(float));
cudaMallocManaged((void**)&block_incrs, num_blocks * sizeof(float));
cudaMallocManaged((void**)&dummy, sizeof(float));
cudaMemcpy(din, in, n * sizeof(float), cudaMemcpyHostToHost);
// // Only applicable if threads_per_block is a power of 2
// reduce_kernel<<<num_blocks, threads_per_block, sizeof(float) *
// threads_per_block>>>(din, block_sums, n);
hillis_steele<<<num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(din, dout, n,
block_sums);
cudaDeviceSynchronize();
// get the block increments (scan it once because of assumption)
int new_num_blocks = (num_blocks + threads_per_block - 1) /
threads_per_block; // will always be 1
hillis_steele<<<new_num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(
block_sums, block_incrs, num_blocks, dummy);
cudaDeviceSynchronize();
// for (int i = 0; i < num_blocks; i++)
// printf("%f ", block_incrs[i]);
// printf("\n");
// add each block increment to each block
adder<<<num_blocks, threads_per_block, sizeof(float)>>>(dout, block_incrs, n);
cudaDeviceSynchronize();
cudaMemcpy(out, dout, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(din);
cudaFree(dout);
cudaFree(block_sums);
cudaFree(block_incrs);
cudaFree(dummy);
}
|
23,061 | #include<stdio.h>
__managed__ int sum=0;
__global__ void Array_sum(int *a, int *n)
{
int tid = threadIdx.x;
if(tid < *n)
atomicAdd(&sum, a[tid]);
}
int main()
{
int n = 10, i;
int a[n];
int *cuda_a, *cuda_n;
for(i=0; i<n; i++)
{
a[i] = rand()%100;
printf("%d ", a[i]);
}
printf("\n");
cudaMalloc((void**)&cuda_a, n*sizeof(int));
cudaMalloc((void**)&cuda_n, sizeof(int));
cudaMemcpy(cuda_a, a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_n, &n, sizeof(int), cudaMemcpyHostToDevice);
Array_sum <<<1, n>>>(cuda_a, cuda_n);
printf("Sum:%d\n", sum);
cudaFree(cuda_a);
cudaFree(cuda_n);
return 0;
} |
23,062 | #include "includes.h"
__global__ void smooth( unsigned char *entrada,unsigned char *saida, int n_linhas, int n_colunas ) {
//Calcula a posição no vetor (id_bloco * total_blocos + id_thread)
int posicao = blockIdx.x * blockDim.x + threadIdx.x;
//Se a posição não é maior que o limite da imagem original...
if(posicao < (n_linhas)*(n_colunas)) {
//soma o valor da região 5x5 em torno no pixel
saida[posicao] =entrada[posicao]+
entrada[posicao+(n_colunas+4)]+
entrada[posicao+(2*(n_colunas+4))]+
entrada[posicao+(3*(n_colunas+4))]+
entrada[posicao+(4*(n_colunas+4))]+
entrada[posicao+1]+
entrada[posicao+(n_colunas+4)+1]+
entrada[posicao+(2*(n_colunas+4))+1]+
entrada[posicao+(3*(n_colunas+4))+1]+
entrada[posicao+(4*(n_colunas+4))+1]+
entrada[posicao+2]+
entrada[posicao+(n_colunas+4)+2]+
entrada[posicao+(2*(n_colunas+4))+2]+
entrada[posicao+(3*(n_colunas+4))+2]+
entrada[posicao+(4*(n_colunas+4))+2]+
entrada[posicao+3]+
entrada[posicao+(n_colunas+4)+3]+
entrada[posicao+(2*(n_colunas+4))+3]+
entrada[posicao+(3*(n_colunas+4))+3]+
entrada[posicao+(4*(n_colunas+4))+3]+
entrada[posicao+4]+
entrada[posicao+(n_colunas+4)+4]+
entrada[posicao+(2*(n_colunas+4))+4]+
entrada[posicao+(3*(n_colunas+4))+4]+
entrada[posicao+(4*(n_colunas+4))+4];
//calcula a média
saida[posicao] = saida[posicao]/25;
}
} |
23,063 | #include "includes.h"
__global__ void integrateBins(int width, int height, int nbins, int* devImage, int binPitch, int* devIntegrals) {
__shared__ int pixels[16];
const int blockX = blockDim.y * blockIdx.x;
const int threadX = threadIdx.y;
const int bin = threadIdx.x;
const int x = blockX + threadX;
if (x >= width) return;
if (bin > nbins) return;
int* imagePointer = devImage + x;
int* outputPointer = devIntegrals + binPitch * x + bin;
int accumulant = 0;
for(int y = 0; y < height; y++) {
if (bin == 0) {
pixels[threadX] = *imagePointer;
}
__syncthreads();
if (pixels[threadX] == bin) accumulant++;
*outputPointer = accumulant;
imagePointer += width;
outputPointer += width * binPitch;
}
} |
23,064 | #include<stdio.h>
#include<stdlib.h>
#include<curand_kernel.h>
#include<curand.h>
#include<sys/time.h>
unsigned int NUM_PARTICLES = 100000;
unsigned int NUM_ITERATIONS = 10;
unsigned int BLOCK_SIZE = 192;
unsigned int GRID_SIZE = ((NUM_PARTICLES/BLOCK_SIZE) + 1);
typedef struct {
float3 posId;
}position;
typedef struct {
float3 velId;
}velocity;
typedef struct {
position pos;
velocity vel;
}Particle;
void fill_data(Particle *p)
{
for(int i=0; i< NUM_PARTICLES; i++)
{
p[i].pos.posId.x = 10*((float)rand()/RAND_MAX);
p[i].pos.posId.y = 10*((float)rand()/RAND_MAX);
p[i].pos.posId.z = 10*((float)rand()/RAND_MAX);
p[i].vel.velId.x = 100*((float)rand()/RAND_MAX);
p[i].vel.velId.y = 100*((float)rand()/RAND_MAX);
p[i].vel.velId.z = 100*((float)rand()/RAND_MAX);
}
}
__global__ void particle_kernel_per_iteration(Particle *p, int Nparticles)
{
int i = (blockIdx.x*blockDim.x)+threadIdx.x;
if(i < Nparticles) {
p[i].pos.posId.x += p[i].vel.velId.x;
p[i].pos.posId.y += p[i].vel.velId.y;
p[i].pos.posId.z += p[i].vel.velId.z;
}
__syncthreads();
}
void update_velocity_position_in_gpu(Particle *p)
{
struct timeval start_time;
struct timeval stop_time;
Particle *gPar = NULL;
cudaMalloc(&gPar, NUM_PARTICLES*sizeof(Particle));
//Start time
gettimeofday(&start_time, NULL);
for(int i=0; i<NUM_ITERATIONS; i++)
{
// Copy Data to GPU Memory
cudaMemcpy(gPar, p, NUM_PARTICLES*sizeof(Particle), cudaMemcpyHostToDevice);
//Launch kernel
particle_kernel_per_iteration<<<GRID_SIZE, BLOCK_SIZE>>>(gPar, NUM_PARTICLES);
cudaDeviceSynchronize();
//Copy Data back to Host
cudaMemcpy(p, gPar, NUM_PARTICLES*sizeof(Particle), cudaMemcpyDeviceToHost);
//Update Velocity in Host before copying data to GPU Memory
for(int j=0; j<NUM_PARTICLES;j++)
{
p[j].vel.velId.x = 100*((float)rand()/RAND_MAX);
p[j].vel.velId.y = 100*((float)rand()/RAND_MAX);
p[j].vel.velId.z = 100*((float)rand()/RAND_MAX);
}
}
//Stop time
gettimeofday(&stop_time, NULL);
printf("Total time of Execution in GPU: %ld msec\n\n",
((stop_time.tv_sec*1000000 + stop_time.tv_usec)-(start_time.tv_sec*1000000 + start_time.tv_usec))/1000);
cudaFree(gPar);
}
int main(int argc, char *argv[])
{
if(argc != 3)
{
printf("No. of arguments to be passed should be 2 i.e. 1st as NUM_PARTICLES and 2nd as BLOCK_SIZE\n");
exit(1);
}
NUM_PARTICLES = atoi(argv[1]);
BLOCK_SIZE = atoi(argv[2]);
Particle *par = NULL;
#ifdef CUDAMALLOCHOST
cudaMallocHost(&par, NUM_PARTICLES*sizeof(Particle));
#else
par = (Particle*)malloc(NUM_PARTICLES*sizeof(Particle));
#endif
fill_data(par);
update_velocity_position_in_gpu(par);
#ifdef CUDAMALLOCHOST
cudaFree(par);
#else
free(par);
#endif
return 0;
}
|
23,065 | #include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <unistd.h>
#include <string>
#include <cuda.h>
#define ThreadNum 256
#define BlockNum 16
__global__ void printOut(char *string) {
printf("%s\n", string);
}
size_t getFileSize(char *filename) {
struct stat st;
stat(filename, &st);
return st.st_size;
}
void parsing(char *aim, long int **offset_table, int *entry) {
int limit = 1024;
int i;
long int *tmp_offset = (long int*) malloc(sizeof(long int) * limit);
char *token = strtok(aim, "\n");
for (i = 0; token != NULL; i ++) {
if (i == limit) {
limit += 1024;
tmp_offset = (long int*) realloc(tmp_offset, sizeof(long int) * limit);
}
tmp_offset[i] = token - aim;
token = strtok(NULL, "\n");
}
printf("Count %d\n", i);
// realloc table
tmp_offset = (long int*) realloc(tmp_offset, sizeof(long int) * i);
// assign & return
*offset_table = tmp_offset;
*entry = i;
}
__device__ int strlen(char *s) {
int i = 0;
while (s[i] != '\0') i ++;
return i;
}
__device__ char *strstrDevice(char *a, char *b) {
int i, j;
int a_len = strlen(a);
int b_len = strlen(b);
int loop_limit = a_len - b_len + 1;
for (i = 0; i < loop_limit; i ++) {
for (j = 0; j < b_len && a[i + j] == b[j]; j ++);
if (j == b_len) return a + i;
}
return NULL;
}
__global__ void matching(char *aim, char *string, long int *offset_table, int entry, int base, int *result) {
int t_id = threadIdx.x;
int b_id = blockIdx.x;
int b_dim = blockDim.x;
int index = base + b_id * b_dim + t_id;
//int aim_len = offset_table[index + 1] - offset_table[index];
//if (index < entry && strstrDevice(string + offset_table[index], aim_len, "apple", 5) != NULL) {
if (index < entry && strstrDevice(string + offset_table[index], aim) != NULL) {
result[index] = 1;
} else {
result[index] = 0;
}
}
int myCmp(const void *a, const void *b) {
return (*(int*) a) - (*(int*) b);
}
int main(int argc, char *argv[]) {
char *filename = argv[1];
int fd = open(filename, O_RDONLY, 0644);
// get mmap data
size_t file_len = getFileSize(filename) + 1;
char *filecontent = (char*) mmap(NULL, file_len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
filecontent[file_len - 1] = '\0';
// parsing
long int *offset_table;
int entry;
parsing(filecontent, &offset_table, &entry);
// copy data to device
char *HD_filecontent;
cudaMalloc(&HD_filecontent, file_len);
cudaMemcpy(HD_filecontent, filecontent, file_len, cudaMemcpyHostToDevice);
// copy offset table to device
long int *D_offset_table;
cudaMalloc(&D_offset_table, sizeof(long int) * entry);
cudaMemcpy(D_offset_table, offset_table, sizeof(long int) * entry, cudaMemcpyHostToDevice);
// matching
int round_limit = ceil(entry / (float) (ThreadNum * BlockNum));
int i;
int *result;
cudaMallocManaged(&result, sizeof(int) * entry);
char *aim;
cudaMallocManaged(&aim, sizeof(char) * 6);
strcpy(aim, "apple");
cudaDeviceSynchronize();
for (i = 0; i < round_limit; i ++) {
matching<<<BlockNum, ThreadNum>>>(aim, HD_filecontent, D_offset_table, entry, i * ThreadNum * BlockNum, result);
}
cudaDeviceSynchronize();
qsort(result, entry, sizeof(int), myCmp);
return 0;
}
|
23,066 | #include <iostream>
int main() {
std::cout << "basic/hello initialized!" << std::endl;
}
|
23,067 | #include "includes.h"
__global__ void binarize_f32 (float* vector, float threshold, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] > threshold ? 1 : 0;
}
} |
23,068 | /*
multiplication table using CUDA
refer : http://blog.daum.net/heoly/7 (Thank you)
*/
#include <stdio.h>
#include <malloc.h>
#include <cuda_runtime.h>
#define BLOCK_SIZE 8
#define THREAD_SIZE 9
// Device code
__global__ void test(int *result)
{
int tidx, bidx;
tidx = threadIdx.x; //x-coordinate of thread
bidx = blockIdx.x; //x-coordinate of block
result[THREAD_SIZE * bidx + tidx] = (bidx + 2) * (tidx + 1);
}
// Host code
int main()
{
int *host_Result; //Save result data of host
int *device_Result; //Save result data of device
int i=0, j=0;
//Allocate host memory
host_Result = (int *)malloc( BLOCK_SIZE * THREAD_SIZE * sizeof(int) );
//Allocate device memory
cudaMalloc( (void**) &device_Result, sizeof(int) * BLOCK_SIZE * THREAD_SIZE);
//Function name <<BLOCK_SIZE, THREAD_SIZE>>> parameters
test <<<BLOCK_SIZE, THREAD_SIZE>>>(device_Result); //Execute Device code
//Copy device result to host result
cudaMemcpy( host_Result, device_Result, sizeof(int) * BLOCK_SIZE * THREAD_SIZE, cudaMemcpyDeviceToHost );
//Print result
for(j=0; j<BLOCK_SIZE; j++)
{
printf("%3d step\n", (j + 2));
for(i=0; i<THREAD_SIZE; i++)
{
printf("%3d X %3d = %3d\n", j+2, i+1, host_Result[j * THREAD_SIZE + i]);
}
printf("\n");
}
free(host_Result); //Free host memory
cudaFree(device_Result); //Free device memory
return 1;
}
|
23,069 | #include <stdio.h>
__device__ void helloCalledFromDevice()
{
printf("Device fn hello from GPU\n");
}
__global__ void helloFromGPU()
{
printf("Hello from GPU thread %d\n", threadIdx.x);
//helloCalledFromDevice();
}
int main()
{
printf("Hello from CPU\n");
helloFromGPU<<<2, 5>>>();
cudaDeviceSynchronize();
}
|
23,070 | #include <stdio.h>
__global__ void forward_step1(float *weight_D, float *a_D, float *res1_D, unsigned int columns) {
unsigned int tid = blockDim.x*threadIdx.y + threadIdx.x;
unsigned int i = blockIdx.z;
unsigned int j = (gridDim.x*blockIdx.y+blockIdx.x)*blockDim.x*blockDim.y + tid;
__shared__ float partial_sums[1024];
if (j < columns) {
partial_sums[tid] = a_D[j]*weight_D[i*columns+j];
} else {
partial_sums[tid] = 0;
}
__syncthreads();
if (tid < 512) { partial_sums[tid] += partial_sums[tid+512]; } __syncthreads();
if (tid < 256) { partial_sums[tid] += partial_sums[tid+256]; } __syncthreads();
if (tid < 128) { partial_sums[tid] += partial_sums[tid+128]; } __syncthreads();
if (tid < 64) { partial_sums[tid] += partial_sums[tid+ 64]; } __syncthreads();
if (tid < 32) { partial_sums[tid] += partial_sums[tid+ 32]; } __syncthreads();
if (tid < 16) { partial_sums[tid] += partial_sums[tid+ 16]; } __syncthreads();
if (tid < 8) { partial_sums[tid] += partial_sums[tid+ 8]; } __syncthreads();
if (tid < 4) { partial_sums[tid] += partial_sums[tid+ 4]; } __syncthreads();
if (tid < 2) { partial_sums[tid] += partial_sums[tid+ 2]; } __syncthreads();
if (tid < 1) {
res1_D[i*64 + gridDim.x*blockIdx.y + blockIdx.x] = partial_sums[0]+partial_sums[1];
}
}
__global__ void forward_step2(float *res1_D, float *bias_D, float *a_D) {
unsigned int i = blockIdx.z;
unsigned int tid = blockDim.x*threadIdx.y + threadIdx.x;
__shared__ float partial_sums[64];
partial_sums[tid] = res1_D[64*i+tid];
__syncthreads();
if (tid < 32) { partial_sums[tid] += partial_sums[tid+ 32]; } __syncthreads();
if (tid < 16) { partial_sums[tid] += partial_sums[tid+ 16]; } __syncthreads();
if (tid < 8) { partial_sums[tid] += partial_sums[tid+ 8]; } __syncthreads();
if (tid < 4) { partial_sums[tid] += partial_sums[tid+ 4]; } __syncthreads();
if (tid < 2) { partial_sums[tid] += partial_sums[tid+ 2]; } __syncthreads();
if (tid < 1) {
partial_sums[0] += partial_sums[1]+bias_D[i];
a_D[i] = 1/(1+expf(-partial_sums[0]));
}
}
__global__ void output_error(float *aL_D, float *y_D, float *deltaL_D) {
unsigned int i = blockIdx.z;
deltaL_D[i] = (aL_D[i]-y_D[i])*aL_D[i]*(1-aL_D[i]);
/*
unsigned int tid = blockDim.x*threadIdx.y + threadIdx.x;
__shared__ float results[2]
if (tid == 0) {
results[0] = a[i]-y[i];
} else if (tid == 1) {
results[1] = 1-a[i];
}
__syncthreads();
deltaL_D[i] = results[0]*results[1]*aL_D[i];
*/
}
__global__ void backward_step1(float *weight_D, float *delta_D, float *res1_D, unsigned int columns, unsigned int rows) {
unsigned int i = blockIdx.z;
unsigned int tid = blockDim.x*threadIdx.y + threadIdx.x;
unsigned int j = (gridDim.x*blockIdx.y+blockIdx.x)*blockDim.x*blockDim.y + tid;
__shared__ float partial_sums[1024];
if (j<rows) {
partial_sums[tid] = weight_D[j*columns+i]*delta_D[j];
} else {
partial_sums[tid] = 0;
}
__syncthreads();
if (tid < 512) { partial_sums[tid] += partial_sums[tid+512]; } __syncthreads();
if (tid < 256) { partial_sums[tid] += partial_sums[tid+256]; } __syncthreads();
if (tid < 128) { partial_sums[tid] += partial_sums[tid+128]; } __syncthreads();
if (tid < 64) { partial_sums[tid] += partial_sums[tid+ 64]; } __syncthreads();
if (tid < 32) { partial_sums[tid] += partial_sums[tid+ 32]; } __syncthreads();
if (tid < 16) { partial_sums[tid] += partial_sums[tid+ 16]; } __syncthreads();
if (tid < 8) { partial_sums[tid] += partial_sums[tid+ 8]; } __syncthreads();
if (tid < 4) { partial_sums[tid] += partial_sums[tid+ 4]; } __syncthreads();
if (tid < 2) { partial_sums[tid] += partial_sums[tid+ 2]; } __syncthreads();
if (tid < 1) {
res1_D[i*64 + gridDim.x*blockIdx.y + blockIdx.x] = partial_sums[0]+partial_sums[1];
}
}
__global__ void backward_step2(float *res1_D, float *a_D, float *delta_D) {
unsigned int i = blockIdx.z;
unsigned int tid = blockDim.x*threadIdx.y + threadIdx.x;
__shared__ float partial_sums[64];
partial_sums[tid] = res1_D[64*i+tid];
__syncthreads();
if (tid < 32) { partial_sums[tid] += partial_sums[tid+ 32]; } __syncthreads();
if (tid < 16) { partial_sums[tid] += partial_sums[tid+ 16]; } __syncthreads();
if (tid < 8) { partial_sums[tid] += partial_sums[tid+ 8]; } __syncthreads();
if (tid < 4) { partial_sums[tid] += partial_sums[tid+ 4]; } __syncthreads();
if (tid < 2) { partial_sums[tid] += partial_sums[tid+ 2]; } __syncthreads();
if (tid < 1) {
partial_sums[0] += partial_sums[1];
delta_D[i] = partial_sums[0]*a_D[i]*(1-a_D[i]); //dσ(t)/dt = σ(t)*(1-σ(t))
}
}
//this is supposed to run with pthreads.
__global__ void weight_gradient(float *a_D, float *delta_D, float *weightG_D, unsigned int columns/*(a_previous)*/, unsigned int rows/*delta*/) {
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i<columns && j<rows) {
weightG_D[j*columns+i] = a_D[i]*delta_D[j];
}
}
//sums 1024 elements of arrays of size size
__global__ void sum_of_1024(float *wG_or_bG, float *result_D, unsigned int size, unsigned int samples) {
unsigned int tid = blockDim.x*threadIdx.y +threadIdx.x;
unsigned int cell = gridDim.y*blockIdx.z + blockIdx.y;
unsigned int sample_id = blockIdx.x*1024+tid;
__shared__ float partial_sums[1024];
if (sample_id>=samples) {
partial_sums[tid] = 0;
} else {
partial_sums[tid] = wG_or_bG[sample_id*size+cell];
}
__syncthreads();
if (tid < 512) { partial_sums[tid] += partial_sums[tid+512]; } __syncthreads();
if (tid < 256) { partial_sums[tid] += partial_sums[tid+256]; } __syncthreads();
if (tid < 128) { partial_sums[tid] += partial_sums[tid+128]; } __syncthreads();
if (tid < 64) { partial_sums[tid] += partial_sums[tid+ 64]; } __syncthreads();
if (tid < 32) { partial_sums[tid] += partial_sums[tid+ 32]; } __syncthreads();
if (tid < 16) { partial_sums[tid] += partial_sums[tid+ 16]; } __syncthreads();
if (tid < 8) { partial_sums[tid] += partial_sums[tid+ 8]; } __syncthreads();
if (tid < 4) { partial_sums[tid] += partial_sums[tid+ 4]; } __syncthreads();
if (tid < 2) { partial_sums[tid] += partial_sums[tid+ 2]; } __syncthreads();
if (tid < 1) {
result_D[cell+blockIdx.x*size] = partial_sums[0]+partial_sums[1];
}
}
__global__ void grad_desc(float *wG_or_bG, float *w_or_b, unsigned int size, unsigned int samples, float learning_rate) {
unsigned int global_id = (blockDim.x*blockDim.y*blockDim.z)*(gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y + blockIdx.x) + (blockDim.x*blockDim.y)*threadIdx.z + blockDim.x*threadIdx.y + threadIdx.x;
if (global_id < size) {
w_or_b[global_id] = w_or_b[global_id] - learning_rate*wG_or_bG[global_id]/samples;
}
} |
23,071 | #include <cuda.h>
#include <stdio.h>
int main(int argc, char** argv) {
struct cudaDeviceProp p;
int device;
cudaGetDevice(&device);
cudaGetDeviceProperties(&p, device);
printf("> %s\n"
"\ttotalGlobalMem: %u B\n"
"\tsharedMemPerBlock: %u B\n" "\tregsPerBlock: %d\n"
"\twarpSize: %d threads\n"
"\tmemPitch: %u B\n"
"\tmaxThreadsPerBlock: %d\n"
"\tmaxThreadsDim: (%d, %d, %d)\n"
"\tmaxGridSize: (%d, %d, %d)\n"
"\tclockRate: %d kHz\n"
"\ttotalConstMem: %u B\n"
"\tCompute Capability: %d.%d\n"
"\ttextureAlignment: %u\n"
"\tdeviceOverlap: %d\n"
"\tmultiProcessorCount: %d\n"
"\tkernelExecTimeoutEnabled: %d\n"
"\tintegrated: %d\n"
"\tcanMapHostMemory: %d\n"
"\tcomputeMode: %d\n"
"\tmaxTexture1D: %d\n"
"\tmaxTexture2D: (%d, %d)\n"
"\tmaxTexture3D: (%d, %d, %d)\n"
"\tmaxTexture1DLayered: (%d, %d)\n"
"\tmaxTexture2DLayered: (%d, %d, %d)\n"
"\tsurfaceAlignment: %u\n"
"\tconcurrentKernels: %d\n"
"\tECCEnabled: %d\n"
"\tPCI Bus ID: %d:%d.%d\n"
"\ttccDriver: %d\n"
"\tasyncEngineCount: %d\n"
"\tunifiedAddressing: %d\n"
"\tmemoryClockRate: %d kHz\n"
"\tmemoryBusWidth: %d bits\n"
"\tl2CacheSize: %d B\n"
"\tmaxThreadsPerMultiProcessor: %d\n",
p.name, p.totalGlobalMem, p.sharedMemPerBlock, p.regsPerBlock,
p.warpSize, p.memPitch, p.maxThreadsPerBlock, p.maxThreadsDim[0],
p.maxThreadsDim[1], p.maxThreadsDim[2], p.maxGridSize[0],
p.maxGridSize[1], p.maxGridSize[2], p.clockRate, p.totalConstMem,
p.major, p.minor, p.textureAlignment, p.deviceOverlap,
p.multiProcessorCount, p.kernelExecTimeoutEnabled, p.integrated,
p.canMapHostMemory, p.computeMode, p.maxTexture1D,
p.maxTexture2D[0], p.maxTexture2D[1], p.maxTexture3D[0],
p.maxTexture3D[1], p.maxTexture3D[2], p.maxTexture1DLayered[0],
p.maxTexture1DLayered[1], p.maxTexture2DLayered[0],
p.maxTexture2DLayered[1], p.maxTexture2DLayered[2],
p.surfaceAlignment, p.concurrentKernels, p.ECCEnabled, p.pciBusID,
p.pciDeviceID, p.pciDomainID, p.tccDriver, p.asyncEngineCount,
p.unifiedAddressing, p.memoryClockRate, p.memoryBusWidth,
p.l2CacheSize, p.maxThreadsPerMultiProcessor);
}
|
23,072 | __global__ void cuda_GetImgDiff(unsigned char *dest, unsigned char *a, unsigned char *b, int res) {
int x = 3*threadIdx.x + 3*(blockIdx.x * blockDim.x);
int y = (3 * res)*threadIdx.y + (3 * res)*(blockIdx.y * blockDim.y);
int z = threadIdx.z;
int i = (x + y + z);
if(a[i] >= b[i]){
dest[i] = a[i] - b[i];
}
else{
dest[i] = b[i] - a[i];
}
}
__global__ void cuda_SumPixels(float *d_in, float *d_out) {
int thId = threadIdx.x;
int id = threadIdx.x + blockDim.x * blockIdx.x;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if(thId < s)
{
d_in[id] += d_in[id + s];
}
__syncthreads();
}
if(thId == 0)
{
d_out[blockIdx.x] = d_in[id];
}
}
__global__ void cuda_ByteToFloat(float *f, unsigned char *b) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
f[x] = (float)b[x];
} |
23,073 | /* Teste la peformance de rsqrt sur un grand nombre de valeurs aléatoires (version GPU)
* À compiler avec `nvcc perf_gpu.cu -o test -O3` (requière CUDA!)
*/
#include <cmath>
#include <chrono>
#include <iostream>
#include <cuda.h>
#define N_FLOAT 100000000
#define MAX_FLOAT 1000
__global__ void rsqrt_vec(float* vec_source, int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) // évite les débordements
vec_source[i] = rsqrtf(vec_source[i]);
}
int main() {
float *floats_source = NULL, *floats_dest = NULL;
float *d_comm = NULL;
int i;
cudaSetDevice(0);
srand((unsigned) time(NULL));
// créee le tableau de flottants
floats_source = (float*) malloc(N_FLOAT *sizeof(float));
if(floats_source == NULL) {
printf("error allocating floats_source");
return -1;
}
for(i=0; i < N_FLOAT; i++) // génère N_FLOAT float entre 1 et MAX_FLOAT
floats_source[i] = ((float) (1 + rand())) / RAND_MAX * MAX_FLOAT;
// alloue de la mémoire pour le résultat
floats_dest = (float*) malloc(N_FLOAT *sizeof(float));
if(floats_dest == NULL) {
printf("error allocating floats_dest");
return -1;
}
// crée la mémoire sur le GPU
cudaMalloc(&d_comm, N_FLOAT *sizeof(float));
// teste les performances:
auto before = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_comm, floats_source, N_FLOAT *sizeof(float), cudaMemcpyHostToDevice);
int blocksize = 512;
int nblock = N_FLOAT/blocksize + (N_FLOAT % blocksize > 0 ? 1: 0);
rsqrt_vec<<<nblock, blocksize>>>(d_comm, N_FLOAT);
cudaMemcpy(floats_dest, d_comm, N_FLOAT *sizeof(float), cudaMemcpyDeviceToHost);
auto t_rsqrt = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now() - before);
std::cout << "Le tout a prit "
<< ((double) t_rsqrt.count() / 1e6)
<< "ms, donc rsqrt: "
<< ((double) t_rsqrt.count() / N_FLOAT)
<< " ns/floats" << std::endl;
free(floats_source);
free(floats_dest);
cudaFree(d_comm);
}
|
23,074 | #include "includes.h"
__global__ static void kernelCalcSum_EffectiveShareAccess_DoubleGlobalAccess(const int* dataArray, int arraySize, int* sum)
{
__shared__ extern int cache[];
int cacheIndex = threadIdx.x;
int arrayIndex1 = (int)(blockDim.x * blockIdx.x + threadIdx.x); // first element
int arrayIndex2 = arrayIndex1 + gridDim.x * blockDim.x; // second element
cache[cacheIndex] = 0;
if (arrayIndex1 < arraySize)
{
cache[cacheIndex] += dataArray[arrayIndex1];
}
if (arrayIndex2 < arraySize)
{
cache[cacheIndex] += dataArray[arrayIndex2];
}
__syncthreads();
int blockSize = blockDim.x;
for (int offset = blockSize >> 1; offset > 0; offset >>= 1)
{
if (cacheIndex < offset)
{
cache[cacheIndex] += cache[cacheIndex ^ offset];
}
__syncthreads();
}
if (cacheIndex == 0)
{
atomicAdd(sum, cache[0]);
}
} |
23,075 | #include <iostream>
#include <iomanip>
#include <sstream>
#include <fstream>
#include <numeric>
#include <stdlib.h>
#include <vector>
#include <algorithm>
using namespace std;
#define REDUCE_BLOCK_SIZE 128
struct Matrix {
Matrix() : elements(NULL), width(0), height(0), pitch(0) {}
~Matrix() { if (elements) delete[] elements; }
unsigned int width;
unsigned int height;
unsigned int pitch;
float* elements;
};
__global__ void matrixMulKernel(float*, float*, float*, int, int, int, int);
__global__ void sigmoidKernel(float*, int);
__global__ void matrixAbsErrorKernel(float*, float*, float*, int, int);
__global__ void absErrorKernel(float*, float*, float*, int);
__global__ void updateParamsAbsErrorKernel(float*, float*, float*, float*, int, float);
__global__ void crossEntropyKernel(float*, float*, float*, int);
__global__ void reduceKernel(float*, float*, int);
inline static void InitializeMatrix(Matrix *mat, int x, int y, float val) {
if (x > mat->width || y > mat->height) {
throw ("invalid access - Initialize Matrix");
}
mat->elements[y * mat->width + x] = val;
}
inline static float Matrix_Element_Required(Matrix *mat, int x, int y){
if (x > mat->width || y > mat->height) {
throw ("invalid access - Matrix Element Required");
}
return mat->elements[y * mat->width + x];
}
static void AllocateMatrix(Matrix *mat, int height, int width){
mat->elements = new float[height * width];
mat->width = width;
mat->height = height;
for (int i = 0; i < mat->width; i++) {
for (int j = 0; j < mat->height; j++) {
InitializeMatrix(mat, i, j, 0.0f);
}
}
}
static void DisplayMatrix(Matrix &mat, bool force = false){
std::cout << "Dim: " << mat.height << ", " << mat.width << "\n";
if ((mat.width < 10 && mat.height < 10) || force){
for (int j = 0; j < mat.height; j++) {
for (int i = 0; i < mat.width; i++) {
std::cout << Matrix_Element_Required(&mat, i, j) << "\t";
}
std::cout << "\n";
}
}
std::cout << std::endl;
}
static bool setup_data (string file_name, Matrix *X, Matrix *y) {
ifstream s(file_name.c_str());
if (!s.is_open()) {
printf("The file does not exist\n");
}
int rows = 0;
int cols = 0;
string line;
while (getline(s, line)) {
if (rows++ == 0) {
stringstream ss(line);
while (ss.good()) {
string substr;
getline(ss, substr, ',');
cols++;
}
}
}
std::cout << "Found " << rows << " rows with " << cols << " columns." << std::endl;
s.clear() ;
s.seekg(0, ios::beg);
AllocateMatrix (X, rows - 1,cols - 2);
AllocateMatrix (y, rows - 1, 1);
getline(s, line);
int ya = 0;
while (getline(s, line)) {
stringstream ss(line);
int xa = 0;
while (ss.good()) {
string substr;
getline(ss, substr, ',');
if (xa == 1) {
float val = atof(substr.c_str());
InitializeMatrix(y, 0, ya, val);
} else if (xa > 1) {
float val = atof(substr.c_str());
InitializeMatrix(X, (xa - 2), ya, val);
}
xa++;
}
ya++;
}
return true;
}
static void Normalize_Matrix_min_max(Matrix *m){
for (int x = 0; x < m->width; ++x) {
float min = Matrix_Element_Required(m, x, 0);
float max = Matrix_Element_Required(m, x, 0);
for (int y = 1; y < m->height; ++y) {
float val = Matrix_Element_Required(m, x, y);
if (val < min) {
min = val;
} else if (val > max) {
max = val;
}
}
for (int y = 0; y < m->height; ++y) {
float val = Matrix_Element_Required(m, x, y);
InitializeMatrix(m, x, y, (val - min) / max);
}
}
}
static void InitializeRandom(Matrix *mat, float LO, float HI){
for (int i = 0; i < mat->width; ++i) {
for (int j = 0; j < mat->height; ++j) {
float r = LO + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(HI-LO)));
InitializeMatrix(mat, i, j, r);
}
}
}
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err){
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
#define SAFE_CALL(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
__global__ void matrixMulKernel(float *m1, float *m2, float *r, int m1w, int m2w, int rw, int rh){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if ((row < rh) && (col < rw)) {
float accum = 0.0f;
for (int c = 0; c < m1w; c++){
float v1 = m1[row * m1w + c];
float v2 = m2[c * m2w + col];
accum += (v1 * v2);
}
r[row * rw + col] = accum;
}
}
__global__ void sigmoidKernel(float *r, int m){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < m) {
float val = r[index];
r[index] = 1.0 / (1.0 + expf(-val));
}
}
__global__ void matrixAbsErrorKernel(float *p, float *ys, float *r, int rw, int rh){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if ((row < rh) && (col < rw)) {
float pval = p[row * rw + col];
float ysval = ys[row * rw + col];
float v = pval - ysval;
r[row * rw + col] = v * v;
}
}
__global__ void absErrorKernel(float *p, float *ys, float *r, int m){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < m) {
float pval = p[index];
float ysval = ys[index];
float v = pval - ysval;
r[index] = v * v;
}
}
__global__ void updateParamsAbsErrorKernel(float *p, float *ys, float *th, float *xs, int m, float alpha){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < m) {
float h = *p;
float y = *ys;
float x = xs[index];
th[index] = th[index] - alpha * (h - y) * x;
}
}
__global__ void crossEntropyKernel(float *p, float *ys, float *r, int m){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < m) {
float pval = p[index];
float ysval = ys[index];
float ex = log1pf(expf(-ysval * pval));
r[index] = ex;
}
}
__global__ void reduceKernel(float * input, float * output, int len) {
__shared__ float partialSum[2 * REDUCE_BLOCK_SIZE];
unsigned int t = threadIdx.x, start = 2 * blockIdx.x * REDUCE_BLOCK_SIZE;
if (start + t < len)
partialSum[t] = input[start + t];
else
partialSum[t] = 0;
if (start + REDUCE_BLOCK_SIZE + t < len)
partialSum[REDUCE_BLOCK_SIZE + t] = input[start + REDUCE_BLOCK_SIZE + t];
else
partialSum[REDUCE_BLOCK_SIZE + t] = 0;
for (unsigned int stride = REDUCE_BLOCK_SIZE; stride >= 1; stride >>= 1) {
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t+stride];
}
if (t == 0)
output[blockIdx.x] = partialSum[0];
}
static void Logistic_Regression_CUDA(Matrix *X, Matrix *y, Matrix *Parameters, Matrix *Train_Parameters, int maxIterations, float alpha, vector<float> &cost_function){
float *gpu_X;
float *gpu_y;
float *gpu_prediction;
float *gpu_params;
float *gpu_abs_error;
float *gpu_err_cost;
float *gpu_predictions;
Matrix predictions;
AllocateMatrix(&predictions, y->height, y->width);
Matrix absErrors;
AllocateMatrix(&absErrors, y->height, y->width);
float mean_error;
float sum=0;
int quantity = 1;
int m = y->height;
int numOutputElements;
numOutputElements = m / (REDUCE_BLOCK_SIZE<<1);
if (m % (REDUCE_BLOCK_SIZE<<1)) {
numOutputElements++;
}
SAFE_CALL(cudaMalloc((void**)&gpu_X, sizeof(float) * X->width * X->height));
SAFE_CALL(cudaMalloc((void**)&gpu_y, sizeof(float) * y->width * y->height));
SAFE_CALL(cudaMalloc((void**)&gpu_prediction, sizeof(float)));
SAFE_CALL(cudaMalloc((void**)&gpu_predictions, sizeof(float) * y->width * y->height));
SAFE_CALL(cudaMalloc((void**)&gpu_abs_error, sizeof(float) * y->width * y->height));
SAFE_CALL(cudaMalloc((void**)&gpu_params, sizeof(float) * Parameters->width * Parameters->height));
SAFE_CALL(cudaMalloc((void**)&gpu_err_cost, sizeof(float) * numOutputElements));
SAFE_CALL(cudaMemcpy(gpu_X, X->elements, sizeof(float) * X->width * X->height, cudaMemcpyHostToDevice));
SAFE_CALL(cudaMemcpy(gpu_y, y->elements, sizeof(float) * y->width * y->height, cudaMemcpyHostToDevice));
SAFE_CALL(cudaMemcpy(gpu_params, Parameters->elements, sizeof(float) * Parameters->width * Parameters->height, cudaMemcpyHostToDevice));
// invoke kernel
static const int blockWidth = 16;
static const int blockHeight = blockWidth;
int numBlocksW = X->width / blockWidth;
int numBlocksH = X->height / blockHeight;
if (X->width % blockWidth) numBlocksW++;
if (X->height % blockHeight) numBlocksH++;
dim3 dimGrid(numBlocksW, numBlocksH);
dim3 dimBlock(blockWidth, blockHeight);
dim3 dimReduce((m - 1) / REDUCE_BLOCK_SIZE + 1);
dim3 dimReduceBlock(REDUCE_BLOCK_SIZE);
dim3 dimVectorGrid(((m - 1) / blockWidth * blockWidth) + 1);
dim3 dimVectorBlock(blockWidth * blockWidth);
float* error_accum = new float[numOutputElements];
for (int iter = 0; iter < maxIterations; ++iter) {
for (int i = 0; i < m; ++i) {
matrixMulKernel<<<dimGrid, dimBlock>>>(&gpu_X[i * X->width], gpu_params, gpu_prediction, X->width, Parameters->width, 1, 1);
sigmoidKernel<<<dimVectorGrid, dimVectorBlock>>>(gpu_prediction, 1);
updateParamsAbsErrorKernel<<<dimVectorGrid, dimVectorBlock>>>(gpu_prediction, &gpu_y[i], gpu_params, &gpu_X[i * X->width], Parameters->height, alpha);
}
matrixMulKernel<<<dimGrid, dimBlock>>>(gpu_X, gpu_params, gpu_predictions, X->width, Parameters->width, predictions.width, predictions.height);
sigmoidKernel<<<dimVectorGrid, dimVectorBlock>>>(gpu_predictions, m);
absErrorKernel<<<dimVectorGrid, dimVectorBlock>>>(gpu_predictions, gpu_y, gpu_abs_error, m);
reduceKernel<<<dimReduce, dimReduceBlock>>>(gpu_abs_error, gpu_err_cost, m);
SAFE_CALL(cudaMemcpy(error_accum, gpu_err_cost, sizeof(float) * numOutputElements, cudaMemcpyDeviceToHost));
float g_sum = 0;
for (int i = 0; i < numOutputElements; ++i){
g_sum += error_accum[i];
}
g_sum /= (2*m);
cost_function.push_back(g_sum);
sum += g_sum;
quantity++;
cout << g_sum << "\n";
}
mean_error = sum/quantity;
printf("\n The mean error is %f\n", mean_error);
cout << endl;
delete[] error_accum;
SAFE_CALL(cudaFree(gpu_X));
SAFE_CALL(cudaFree(gpu_y));
SAFE_CALL(cudaFree(gpu_abs_error));
SAFE_CALL(cudaFree(gpu_prediction));
SAFE_CALL(cudaFree(gpu_predictions));
SAFE_CALL(cudaFree(gpu_params));
SAFE_CALL(cudaFree(gpu_err_cost));
}
int main(int argc, char *argv[]){
string input_file = "benign.csv";
cout << "Please enter a valid file to run test for logistic regression on CUDA:\n>";
//getline(cin, input_file);
cout << "You entered: " << input_file << endl << endl;
Matrix X,y;
setup_data (input_file, &X, &y);
cout <<"\n The X - Squiggle Matrix." << endl;
DisplayMatrix (X,true);
cout <<"\n The y - Matrix." << endl;
DisplayMatrix (y,true);
Matrix Parameters, Train_Parameters;
AllocateMatrix(&Parameters, X.width, 1);
AllocateMatrix(&Train_Parameters, X.width, 1);
InitializeRandom(&Parameters, -1.0, 1.0);
Normalize_Matrix_min_max(&X);
vector<float> cost_function;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
Logistic_Regression_CUDA(&X, &y, &Parameters, &Train_Parameters, 150, 0.03, cost_function);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("\nProcessing time: %f (ms)\n", milliseconds);
std::cout <<"Done!"<< std::endl;
return 0;
}
//Created by : Yehezk34
|
23,076 |
// This is not really C++-code but pretty plain C code, but we compile it
// as C++ so we can integrate with CUDA seamlessly.
// If you plan on submitting your solution for the Parallel Sorting Contest,
// please keep the split into main file and kernel file, so we can easily
// insert other data.
#define BLOCKSIZE 1024
__device__
static void exchange(int *i, int *j)
{
int k;
k = *i;
*i = *j;
*j = k;
}
__global__
void bitonic_block(int *data, int N, int j, int k)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N) // stop criteria from for(i=0;i<N;i++) CPU part
{
// copy paste from CPU part
int ixj=i^j; // Calculate indexing!
if ((ixj)>i)
{
if ((i&k)==0 && data[i]>data[ixj]) exchange(&data[i],&data[ixj]);
if ((i&k)!=0 && data[i]<data[ixj]) exchange(&data[i],&data[ixj]);
}
}
}
// No, this is not GPU code yet but just a copy of the CPU code, but this
// is where I want to see your GPU code!
void bitonic_gpu(int *data, int N)
{
// int i,j,k;
int j,k;
int size = sizeof(int) * N;
int* devicedata;
cudaMalloc( (void**)&devicedata, size);
cudaMemcpy(devicedata, data, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCKSIZE,1);
dim3 dimGrid(N/BLOCKSIZE,1);
for (k=2;k<=N;k=2*k) // Outer loop, double size for each step
{
for (j=k>>1;j>0;j=j>>1) // Inner loop, half size for each step
{
bitonic_block<<<dimGrid, dimBlock>>>(devicedata, N, j, k);
cudaThreadSynchronize();
/*
for (i=0;i<N;i++) // Loop over data
{
int ixj=i^j; // Calculate indexing!
if ((ixj)>i)
{
if ((i&k)==0 && data[i]>data[ixj]) exchange(&data[i],&data[ixj]);
if ((i&k)!=0 && data[i]<data[ixj]) exchange(&data[i],&data[ixj]);
}
}
*/
}
}
cudaMemcpy(data, devicedata, size, cudaMemcpyDeviceToHost);
/* Original CPU sort
for (k=2;k<=N;k=2*k) // Outer loop, double size for each step
{
for (j=k>>1;j>0;j=j>>1) // Inner loop, half size for each step
{
for (i=0;i<N;i++) // Loop over data
{
int ixj=i^j; // Calculate indexing!
if ((ixj)>i)
{
if ((i&k)==0 && data[i]>data[ixj]) exchange(&data[i],&data[ixj]);
if ((i&k)!=0 && data[i]<data[ixj]) exchange(&data[i],&data[ixj]);
}
}
}
}
*/
}
|
23,077 | #include <stdio.h>
__global__ void add(int* d_a, int* d_b, int* d_c){
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if(tid < 2000){
d_c[tid] = d_a[tid] + d_b[tid];
}
}
int main(int argc, char* argv[]){
cudaSetDevice(1);
return 0;
}
|
23,078 | #include "includes.h"
__global__ void conv_layer_forward_gpu(float *x, float *w, float *y, int h_in, int w_in, int w_out, int k, int m) {
int n, m_, h, w_, p, q;
n = blockIdx.x; // Batch index
m_ = blockIdx.y; // Channel index
h = threadIdx.y; // Pixel (h, w_)
w_ = threadIdx.x; // Pixel (h, w_)
float ans = 0; // Return value
int offset = n * (h_in * w_in);
// Load w into shared memory to speed up the data access
__shared__ float cached_w[CONV_KERNEL_SIZE][CONV_KERNEL_SIZE];
if (h < k && w_ < k) {
cached_w[h][w_] = w[m_ * (k * k) + h * k + w_];
}
__syncthreads();
// Loop over k by k kernel
if (h < w_out && w_ < w_out) {
for (p = 0; p < k; p++) {
for (q = 0; q < k; q++)
ans = ans + x[offset + (h + p) * w_in + (w_ + q)] * cached_w[p][q];
}
// Write out the return value
y[n * (m * w_out * w_out) + m_ * (w_out * w_out) + h * w_out + w_] = ans;
}
} |
23,079 | #define W 500
#define H 500
#define TX 32
#define TY 32
__global__
void distanceKernel(float *d_out, int w, int h, float2 pos)
{
const int c = blockIdx.x*blockDim.x+threadIdx.x;
const int r = blockIdx.y*blockDim.y+threadIdx.y;
const int i = c+r*w;
if((c>=w) || (r>=h)) return;
d_out[i]=sqrtf((c-pos.x)*(c-pos.x)+(r-pos.y)*(r-pos.y));
}
int main()
{
float *out=(float *)calloc(W*H, sizeof(float));
float *d_out;
cudaMalloc(&d_out, W*H*sizeof(float));
const float2 pos={0.0f,0.0f};
const dim3 blockSize(TX,TY);
const int bx = (W+TX-1)/TX;
const int by = (H+TY-1)/TY;
const dim3 gridSize(bx,by);
distanceKernel<<<gridSize,blockSize>>>(d_out, W,H,pos);
cudaMemcpy(out, d_out, W*H*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_out);
free(out);
return 0;
}
|
23,080 | // #include <stdlib.h>
// #include <stdio.h>
// #include <math.h>
// #include <string.h>
//
// //#include "cuPrintf.cu"
// #include "K_Common.cuh"
// #include <cutil.h>
// #include "host_defines.h"
// #include "builtin_types.h"
//
// #include "SimDEM.cuh"
// #include "CudaUtils.cuh"
//
//
// // Grid textures and constants
// #ifdef USE_TEX
// texture<uint, 1, cudaReadModeElementType> neighbors_tex;
// texture<uint, 1, cudaReadModeElementType> cell_indexes_start_tex;
// texture<uint, 1, cudaReadModeElementType> cell_indexes_end_tex;
//
// // Fluid textures and constants
// texture<float_vec, 1, cudaReadModeElementType> position_tex;
// texture<float_vec, 1, cudaReadModeElementType> velocity_tex;
// texture<float_vec, 1, cudaReadModeElementType> veleval_tex;
// texture<float_vec, 1, cudaReadModeElementType> color_tex;
// texture<float_vec, 1, cudaReadModeElementType> force_tex;
// #endif
//
// namespace SimLib { namespace Sim { namespace DEM {
//
// __device__ __constant__ DEMParams cDEMParams;
// __device__ __constant__ GridParams cGridParams;
//
// #include "K_SimDEM.cu"
//
// SimDEM::SimDEM(SimCudaAllocator* SimCudaAllocator)
// : SimBase(SimCudaAllocator)
// , mAlloced(false)
// {
// mGPUTimer = new ocu::GPUTimer();
//
// mDEMBuffers = new BufferManager<DEMBufferID>();
//
// mDEMBuffers->SetBuffer(BufferForce, new SimBufferCuda(mSimCudaAllocator, Device, sizeof(float_vec)));
// mDEMBuffers->SetBuffer(BufferForceSorted, new SimBufferCuda(mSimCudaAllocator, Device, sizeof(float_vec)));
// }
//
// SimDEM::~SimDEM()
// {
// delete mGPUTimer; mGPUTimer = NULL;
// delete mDEMBuffers; mDEMBuffers = NULL;
// }
//
//
// void SimDEM::SetParams(uint numParticles, float gridWorldSize, DEMParams &demParams)
// {
// hDEMParams = demParams;
//
// // call base class
// SimBase::SetParams(demParams.collide_dist/demParams.scale_to_simulation, gridWorldSize);
//
// Alloc(numParticles);
//
// GridParams hGridParams = mUniformGrid->GetGridParams();
//
// //Copy the grid parameters to the GPU
// CUDA_SAFE_CALL(cudaMemcpyToSymbol (cGridParams, &hGridParams, sizeof(GridParams) ) );
// CUDA_SAFE_CALL(cudaThreadSynchronize());
//
// //Copy the fluid parameters to the GPU
// CUDA_SAFE_CALL(cudaMemcpyToSymbol (cDEMParams, &hDEMParams, sizeof(DEMParams) ) );
// CUDA_SAFE_CALL(cudaThreadSynchronize());
//
// }
//
// void SimDEM::Alloc(uint numParticles)
// {
// if(!mParams)
// {
// printf("SimDEM::Alloc, no params!");
// return;
// }
//
// if (mAlloced)
// Free();
//
// // call base class
// SimBase::Alloc(numParticles);
//
// mNumParticles = numParticles;
//
// mDEMBuffers->AllocBuffers(mNumParticles);
//
// // cudaPrintfInit();
//
// BindTextures();
//
// mAlloced = true;
// }
//
//
// void SimDEM::Free()
// {
// SimBase::Free();
//
// UnbindTextures();
//
// mDEMBuffers->FreeBuffers();
//
// // cudaPrintfEnd();
//
// mAlloced = false;
// }
//
//
// void SimDEM::Clear()
// {
// SimBase::Clear();
//
// mDEMBuffers->MemsetBuffers(0);
// }
//
// DEMParams& SimDEM::GetFluidParams()
// {
// return hDEMParams;
// }
//
// float SimDEM::GetParticleSize()
// {
// return hDEMParams.particle_radius/hDEMParams.scale_to_simulation;
// }
// float SimDEM::GetParticleSpacing()
// {
// return 2*hDEMParams.particle_radius/hDEMParams.scale_to_simulation;
// }
//
// void SimDEM::Simulate(bool doTiming, bool progress, bool gridWallCollisions, bool terrainCollisions, TerrainData dTerrainData)
// {
// float time_hash,time_radixsort, time_updatelists, time_computeCollisions, time_integrateForces;
//
// time_hash = mUniformGrid->Hash(doTiming, mBaseBuffers->Get(BufferPosition)->GetPtr<float_vec>(), mNumParticles);
//
// time_radixsort = mUniformGrid->Sort(doTiming);
//
// time_updatelists = BuildDataStruct(doTiming);
//
// time_computeCollisions = ComputeCollisions(doTiming);
//
// time_integrateForces = Integrate(doTiming, progress, mSettings->GetValue("Timestep"), gridWallCollisions, terrainCollisions, dTerrainData);
//
// if(doTiming)
// {
// char tmp[2048];
// sprintf(tmp,"%4.4f\t%4.4f\t%4.4f\t%4.4f\t%4.4f\t\n", time_hash, time_radixsort, time_updatelists, time_computeCollisions, time_integrateForces);
// printf(tmp);
// }
// }
//
// void SimDEM::BindTextures()
// {
// DEMData dParticleDataSorted = GetParticleDataSorted();
//
// #ifdef USE_TEX
// CUDA_SAFE_CALL(cudaBindTexture(0, position_tex, dParticleDataSorted.position, mNumParticles*sizeof(float_vec)));
// CUDA_SAFE_CALL(cudaBindTexture(0, velocity_tex, dParticleDataSorted.velocity, mNumParticles*sizeof(float_vec)));
// CUDA_SAFE_CALL(cudaBindTexture(0, veleval_tex, dParticleDataSorted.veleval, mNumParticles*sizeof(float_vec)));
// CUDA_SAFE_CALL(cudaBindTexture(0, color_tex, dParticleDataSorted.color, mNumParticles*sizeof(float_vec)));
// CUDA_SAFE_CALL(cudaBindTexture(0, force_tex, dParticleDataSorted.force, mNumParticles*sizeof(float_vec)));
//
// #ifdef SPHSIMLIB_USE_NEIGHBORLIST
// CUDA_SAFE_CALL(cudaBindTexture(0, neighbors_tex, dNeighborList.neighbors, dNeighborList.MAX_NEIGHBORS * dNeighborList.numParticles * sizeof(uint)));
// #endif
//
// GridData dGridData = mUniformGrid->GetGridData();
// CUDA_SAFE_CALL(cudaBindTexture(0, cell_indexes_start_tex, dGridData.cell_indexes_start, mUniformGrid->GetNumCells() * sizeof(uint)));
// CUDA_SAFE_CALL(cudaBindTexture(0, cell_indexes_end_tex, dGridData.cell_indexes_end, mUniformGrid->GetNumCells() * sizeof(uint)));
// #endif
// }
//
// void SimDEM::UnbindTextures()
// {
// #ifdef USE_TEX
// CUDA_SAFE_CALL(cudaUnbindTexture(position_tex));
// CUDA_SAFE_CALL(cudaUnbindTexture(velocity_tex));
// CUDA_SAFE_CALL(cudaUnbindTexture(veleval_tex));
// CUDA_SAFE_CALL(cudaUnbindTexture(color_tex));
// CUDA_SAFE_CALL(cudaUnbindTexture(force_tex));
//
// #ifdef SPHSIMLIB_USE_NEIGHBORLIST
// CUDA_SAFE_CALL(cudaUnbindTexture(neighbors_tex));
// #endif
//
// GridData dGridData = mUniformGrid->GetGridData();
// CUDA_SAFE_CALL(cudaUnbindTexture(cell_indexes_start_tex));
// CUDA_SAFE_CALL(cudaUnbindTexture(cell_indexes_end_tex));
// #endif
// }
//
// DEMData SimDEM::GetParticleDataSorted()
// {
// DEMData dParticleDataSorted;
// dParticleDataSorted.color = mBaseBuffers->Get(BufferColorSorted)->GetPtr<float_vec>();
// dParticleDataSorted.position = mBaseBuffers->Get(BufferPositionSorted)->GetPtr<float_vec>();
// dParticleDataSorted.veleval = mBaseBuffers->Get(BufferVelevalSorted)->GetPtr<float_vec>();
// dParticleDataSorted.velocity = mBaseBuffers->Get(BufferVelocitySorted)->GetPtr<float_vec>();
// dParticleDataSorted.force = mDEMBuffers->Get(BufferForceSorted)->GetPtr<float_vec>();
// return dParticleDataSorted;
// }
//
// DEMData SimDEM::GetParticleData()
// {
// DEMData dParticleData;
// dParticleData.color = mBaseBuffers->Get(BufferColor)->GetPtr<float_vec>();
// dParticleData.position = mBaseBuffers->Get(BufferPosition)->GetPtr<float_vec>();
// dParticleData.veleval = mBaseBuffers->Get(BufferVeleval)->GetPtr<float_vec>();
// dParticleData.velocity = mBaseBuffers->Get(BufferVelocity)->GetPtr<float_vec>();
// dParticleData.force = mDEMBuffers->Get(BufferForce)->GetPtr<float_vec>();
//
// return dParticleData;
// }
//
//
// float SimDEM::BuildDataStruct(bool doTiming)
// {
// GridData dGridData = mUniformGrid->GetGridData();
// DEMData dParticleData = GetParticleData();
// DEMData dParticleDataSorted = GetParticleDataSorted();
//
// // Used 10 registers, 192+16 bytes smem, 144 bytes cmem[0], 12 bytes cmem[1]
// uint numThreads, numBlocks;
// computeGridSize(mNumParticles, 128, numBlocks, numThreads);
//
// //dynamically allocated shared memory (per block)
// uint smemSize = sizeof(uint)*(numThreads+1);
//
// if(doTiming)
// {
// mGPUTimer->start();
// }
//
// // set all cells to empty
// CUDA_SAFE_CALL(cudaMemset(dGridData.cell_indexes_start, 0xff, mUniformGrid->GetNumCells() * sizeof(uint)));
//
// K_Grid_UpdateSorted<DEMSystem, DEMData><<< numBlocks, numThreads, smemSize>>> (
// mNumParticles,
// dParticleData,
// dParticleDataSorted,
// dGridData
// );
//
// //CUT_CHECK_ERROR("Kernel execution failed");
//
// if(doTiming)
// {
// mGPUTimer->stop();
// return mGPUTimer->elapsed_ms();
// }
//
// return 0;
// }
//
//
// float SimDEM::ComputeCollisions(bool doTiming)
// {
// GridData dGridData = mUniformGrid->GetGridData();
// DEMData dParticleDataSorted = GetParticleDataSorted();
//
// // Used 25 registers, 144+16 bytes smem, 160 bytes cmem[0], 8 bytes cmem[1], 8 bytes cmem[14]
// uint threadsPerBlock = 320;
//
// uint numThreads, numBlocks;
// computeGridSize(mNumParticles, threadsPerBlock, numBlocks, numThreads);
//
// if(doTiming)
// {
// mGPUTimer->start();
// }
//
// computeCollisions<<<numBlocks, numThreads>>>(
// mNumParticles,
// dNeighborList,
// dParticleDataSorted,
// dGridData
// );
//
// //CUT_CHECK_ERROR("Kernel execution failed");
//
// if(doTiming)
// {
// mGPUTimer->stop();
// return mGPUTimer->elapsed_ms();
// }
//
// return 0;
// }
//
// float SimDEM::Integrate(bool doTiming, bool progress, float deltaTime, bool gridWallCollisions, bool terrainCollisions, TerrainData dTerrainData)
// {
// GridData dGridData = mUniformGrid->GetGridData();
// DEMData dParticleData = GetParticleData();
// DEMData dParticleDataSorted = GetParticleDataSorted();
//
// //Used 25 registers, 208+16 bytes smem, 144 bytes cmem[0], 16 bytes cmem[1]
// uint numThreads, numBlocks;
// computeGridSize(mNumParticles, 320, numBlocks, numThreads);
//
// if(doTiming)
// {
// mGPUTimer->start();
// }
//
// integrateDEM<<<numBlocks, numThreads>>>(
// mNumParticles,
// gridWallCollisions, terrainCollisions,
// deltaTime,
// progress,
// dGridData,
// dParticleData,
// dParticleDataSorted,
// dTerrainData
// );
//
// //CUT_CHECK_ERROR("Kernel execution failed");
//
// //cudaPrintfDisplay(stdout, true);
//
// if(doTiming)
// {
// mGPUTimer->stop();
// return mGPUTimer->elapsed_ms();
// }
//
// return 0;
// }
//
// }}} // namespace SimLib { namespace Sim { namespace SimpleSPH { |
23,081 | #include "includes.h"
__global__ void stencil_1d(int n, double *in, double *out)
{
/* calculate global index in the array */
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
/* return if my global index is larger than the array size */
if( globalIndex >= n ) return;
/* code to handle the boundary conditions */
if( globalIndex < RADIUS || globalIndex >= (n - RADIUS) )
{
out[globalIndex] = (double) globalIndex * ( (double)RADIUS*2 + 1) ;
return;
} /* end if */
double result = 0.0;
for( int i = globalIndex-(RADIUS); i <= globalIndex+(RADIUS); i++ )
{
result += in[i];
}
out[globalIndex] = result;
return;
} |
23,082 | #include <iostream>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
__global__ void kernel(int n, float a, float* x, float* y){
for( int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){
x[i] = a * x[i] + y[i];
}
}
int main(void){
int N = 1 << 29;
float a = 11.0;
float *h_x;
float *h_y;
float *d_x;
float *d_y;
h_x = (float*)malloc(N*sizeof(float));
h_y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for(int i = 0; i < N; i++){
h_x[i] = rand();
h_y[i] = rand();
}
cudaMemcpy(d_x, h_x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, N*sizeof(float), cudaMemcpyHostToDevice);
auto t0 = Clock::now();
kernel<<<128, 128>>>(N, a, d_x, d_y);
auto t1 = Clock::now();
cudaMemcpy(h_x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
std::cout
<< "elapsed: " << std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count() << "ns" << std::endl;
}
|
23,083 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24) {
if (comp == (+1.5676E-44f / (var_3 + var_4 + -1.3673E-13f + var_5))) {
for (int i=0; i < var_1; ++i) {
comp += +1.1729E-43f + var_6;
float tmp_1 = var_7 * +1.2514E-35f;
comp = tmp_1 * var_8 * (var_9 / var_10);
for (int i=0; i < var_2; ++i) {
comp = var_11 + cosf((-1.4713E-44f / var_12 * -0.0f * var_13 + var_14));
float tmp_2 = (-0.0f - -1.4009E-42f * var_15 * (var_16 - -1.9726E35f));
comp += tmp_2 * floorf(+1.4955E-43f * +1.0380E-5f);
}
if (comp >= (-1.3013E-7f + (var_17 * +1.7098E-44f / (var_18 + (-1.9844E-41f / var_19))))) {
comp = var_20 - (-1.0184E-25f - acosf((var_21 / (+1.0578E-37f - (var_22 - var_23 + +1.9561E35f * var_24)))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25);
cudaDeviceSynchronize();
return 0;
}
|
23,084 | // Copyright (c) OpenMMLab. All rights reserved.
#include <cuda_runtime.h>
namespace mmdeploy {
namespace cuda {
__global__ void FillKernel(void* dst, size_t dst_size, const void* pattern, size_t pattern_size) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
auto p_dst = static_cast<uchar1*>(dst);
auto p_pattern = static_cast<const uchar1*>(pattern);
for (; idx < dst_size; idx += blockDim.x * gridDim.x) {
auto ptr = idx % pattern_size;
p_dst[idx] = p_pattern[ptr];
}
}
int Fill(void* dst, size_t dst_size, const void* pattern, size_t pattern_size,
cudaStream_t stream) {
const unsigned int n_threads = 256;
const unsigned int n_blocks = (dst_size + n_threads - 1) / n_threads;
FillKernel<<<n_blocks, n_threads, 0, stream>>>(dst, dst_size, pattern, pattern_size);
return 0;
}
} // namespace cuda
} // namespace mmdeploy
|
23,085 | #include <iostream>
#include <cuda_runtime_api.h>
#include <cuda.h>
// Define and implement the GPU addition function
// This version is a vector addition, with N threads
// and one block.
// Adding one a and b instance and storing in one c instance.
__global__ void add(int *a, int *b, int *c)
{
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
// Nmber of blocks
#define N 512
int main()
{
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N* sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Allocate memory for the host a, b, and c arrays
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
// Store known values in the a and b arrays
for (int i = 0; i < N; ++i)
{
a[i] = 10*i;
b[i] = 20*i;
}
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU with N threads on 1 block
add<<<1,N>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Print results
for (int i = 0; i < N; ++i)
{
std::cout << "sum[" << i << "] is " << c[i] << std::endl;
}
// Cleanup
free(a);
free(b);
free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
23,086 | #include<stdlib.h>
#include<stdio.h>
#include<cuda.h>
//indexes "threadIdx.x" elements into the array, adds threadId.x to it (effectively doubling it), then adding 1
__global__ void multiGo(float* arr)
{
arr[threadIdx.x] += threadIdx.x + 1;
}
int main()
{
int N = 5;
size_t size = N * sizeof(float);//the size in bytes of all data transfers, used in cuda-specific calls and mallocs
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
int index = 0;
//fills h_A with the numbers 0-(n-1)
for(index = 0; index < N; index = index + 1)
{
h_A[index] = index;
printf("%f\n",h_A[index]);
}
float* d_A;
cudaError_t problemo;//used for error checking, optional (cudamalloc returns a cudaError_t)
problemo = cudaMalloc((void**)&d_A, size);
if(problemo != cudaSuccess)
{
printf("%s\n", cudaGetErrorString(problemo));
}
//copies the contents of h_A (on the host) into d_A (on the device/GPU)
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
int id = 0;
//initializes elements of h_B to -1
for(id = 0; id < N; id = id + 1)
{
h_B[id] = -1;
printf("%f\n",h_B[id]);
}
multiGo<<<1,5>>>(d_A);//Cuda function call
//copies the cuda memory back into the h_B on the device
cudaMemcpy(h_B, d_A, size, cudaMemcpyDeviceToHost);
//prints out the elements of B
int index_Two = 0;
for(index_Two = 0; index_Two < N; index_Two = index_Two + 1)
{
printf("%f\n", h_B[index_Two]);
}
free(h_A);
cudaFree(d_A);
}
|
23,087 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
void query_device(){
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0){
printf("No CUDA support device found\n");
}
int devNo = 0;
cudaDeviceProp iProp;
cudaGetDeviceProperties(&iProp, devNo);
printf("Device %d: %s\n", devNo, iProp.name);
printf("Clock rate: %d\n", iProp.clockRate);
printf("Number of multiprocessors: %d\n", iProp.multiProcessorCount);
printf("Compute capability: %d.%d\n", iProp.major, iProp.minor);
printf("Amount of global memory: %4.2f KB\n", (double) (iProp.totalGlobalMem/1024));
printf("Amount of constant memory: %4.2f KB\n", (double) (iProp.totalConstMem/1024));
printf("Amount of shared memory per block: %4.2f KB\n", (double) (iProp.sharedMemPerBlock/1024));
printf("Max threads per block: %d\n", iProp.maxThreadsPerBlock);
printf("Max block dimension: (%d, %d, %d)\n", iProp.maxThreadsDim[0], iProp.maxThreadsDim[1], iProp.maxThreadsDim[2]);
printf("Max grid size: (%d, %d, %d)\n", iProp.maxGridSize[0], iProp.maxGridSize[1], iProp.maxGridSize[2]);
return;
}
int main(){
query_device();
return 0;
}
|
23,088 | #define _CRT_SECURE_NO_WARNINGS
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <cstdint>
#include <cstdio>
#include <chrono>
#include <algorithm>
#include <cassert>
#include <iostream>
//const bool DEBUG = true;
//
#define R1 64
//
#define R2 2
#define INF INT32_MAX / 2
//
int ceil(const int a, const int b);
//#define Q2 ceil(R1, R2)
#define Q2 (R1 / R2)
//
#define S1 ((R2 * R2 > R1) ? R1 : R2 * R2)
#define T1 (R1 / S1)
#define T2 ((S1 < R1) ? R1 : Q2 * Q2)
#define S2 (R1 / T2)
#define INPUT_PATH "CUDA_FW/graph.txt"
#define OUTPUT_PATH "output.txt"
#define OUTPUT_TRUE_PATH "output_true.txt"
//n - vertices, m - edges
int n, m;
__global__ void kernelI(int * deviceGraph, int pitch, int n, int k);
__global__ void kernelSD(int* deviceGraph, int pitch, int n, int k);
__global__ void kernelDD(int* deviceGraph, int pitch, int n, int k);
cudaError_t cudaStatus = cudaSetDevice(0);
int* readGraph() {
FILE* inputFile = fopen(INPUT_PATH, "r");
fscanf(inputFile, "%d %d", &n, &m);
int* graph;
cudaStatus = cudaMallocHost(&graph, sizeof(int) * n * n);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "CudaMallocHost failed!");
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
graph[i * n + j] = (i != j) ? INF : 0;
}
}
for (int i = 0; i < m; ++i) {
int u, v, d;
fscanf(inputFile, "%d %d %d", &u, &v, &d);
graph[u * n + v] = d;
}
fclose(inputFile);
return graph;
}
void writeResult(int32_t * hostGraph) {
FILE* outputFile = fopen("output.txt", "w");
fprintf(outputFile, "%d\n", n);
int k = 0;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
fprintf(outputFile, "%d ", hostGraph[k]);
++k;
}
fprintf(outputFile, "\n");
}
fclose(outputFile);
}
int ceil(const int a, const int b) {
return (a + b - 1) / b;
}
bool check(int* graph) {
FILE* file = fopen(OUTPUT_TRUE_PATH, "r");
int n1;
fscanf(file, "%d", &n1);
for (int i = 0; i < n1; ++i) {
for (int j = 0; j < n1; ++j) {
int e;
fscanf(file, "%d", &e);
if (e != graph[i * n + j]) {
fclose(file);
return false;
}
}
}
fclose(file);
return true;
}
__global__ void wakeGPU() {
int i = threadIdx.x;
}
int main() {
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
//host located graph
int* hostGraph = readGraph();
printf("Graph is loaded\n");
const int Q1 = ceil(n, R1);
if (Q1 * R1 != n) {
printf("n should divide by R1\n");
return 1;
}
int* deviceGraph = 0;
size_t pitch;
int pitchInt;
cudaStatus = cudaMallocPitch((void**)& deviceGraph, &pitch, (size_t)(n * sizeof(int32_t)), (size_t)n);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "CudaMallocPitch failed!");
}
cudaStatus = cudaMemcpy2D(deviceGraph, pitch, hostGraph, n * sizeof(int32_t), n * sizeof(int32_t), n, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy2D failed!");
}
assert(!(pitch % sizeof(int)));
pitchInt = pitch / sizeof(int);
dim3 gridI(1);
dim3 blockI(Q2, Q2);
dim3 gridSD(Q1 - 1, 2);
dim3 blockSD(Q2, Q2);
dim3 gridDD(Q1 - 1, Q1 - 1);
dim3 blockDD(Q2, Q2);
cudaEvent_t stepFinishedEvent;
cudaEventCreate(&stepFinishedEvent);
wakeGPU<<<1, 1>>>();
auto start = std::chrono::high_resolution_clock::now();
for (int k = 0; k < Q1; ++k) {
kernelI<<<gridI, blockI>>>(deviceGraph, pitchInt, n, k);
cudaEventRecord(stepFinishedEvent);
cudaEventSynchronize(stepFinishedEvent);
kernelSD<<<gridSD, blockSD>>>(deviceGraph, pitchInt, n, k);
cudaEventRecord(stepFinishedEvent);
cudaEventSynchronize(stepFinishedEvent);
kernelDD<<<gridDD, blockDD>>>(deviceGraph, pitchInt, n, k);
cudaEventRecord(stepFinishedEvent);
cudaEventSynchronize(stepFinishedEvent);
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end - start;
std::cout << diff.count() * 1000 << "(ms)\n";
cudaStatus = cudaMemcpy2D(hostGraph, n * sizeof(int32_t), deviceGraph, pitch, n * sizeof(int32_t), n, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy2D failed!");
}
printf(check(hostGraph) ? "Result is correct\n" : "Result is not correct\n");
writeResult(hostGraph);
cudaFree(deviceGraph);
return 0;
}
//
__global__ void kernelI(int* __restrict__ deviceGraph, const int pitch, const int n, const int kBlock) {
const int localI = threadIdx.y * R2;
const int localJ = threadIdx.x * R2;
const int base = kBlock * R1;
//const int globalI = base + localI;
//const int globalJ = base + localJ;
__shared__ int localBlock[R1][R1];
const int threadID = threadIdx.y * Q2 + threadIdx.x;
const int offsetI = threadID / R1;
const int offsetJ = threadID % R1;
int i = offsetI;
//#pragma unroll
for (int i1 = 0; i1 < S1; ++i1) {
int j = offsetJ;
//#pragma unroll
for (int i2 = 0; i2 < S2; ++i2) {
localBlock[i][j] = deviceGraph[(base + i) * pitch + (base + j)];
j += T2;
}
i += T1;
}
__syncthreads();
//#pragma unroll
for (int k = 0; k < R1; ++k) {
//#pragma unroll
for (int i = 0; i < R2; ++i) {
//#pragma unroll
for (int j = 0; j < R2; ++j) {
localBlock[localI + i][localJ + j] = min(localBlock[localI + i][localJ + j],
localBlock[localI + i][k] + localBlock[k][localJ + j]);
}
}
__syncthreads();
}
i = offsetI;
//#pragma unroll
for (int i1 = 0; i1 < S1; ++i1) {
int j = offsetJ;
//#pragma unroll
for (int i2 = 0; i2 < S2; ++i2) {
deviceGraph[(base + i) * pitch + (base + j)] = localBlock[i][j];
j += T2;
}
i += T1;
}
}
__global__ void kernelSD(int* __restrict__ deviceGraph, const int pitch, const int n, const int kBlock) {
const int localI = threadIdx.y * R2;
const int localJ = threadIdx.x * R2;
const int baseLead = kBlock * R1;
int baseI, baseJ;
if (blockIdx.y == 0) {
baseI = baseLead;
baseJ = (blockIdx.x + (blockIdx.x >= kBlock)) * R1;
}
else {
baseJ = baseLead;
baseI = (blockIdx.x + (blockIdx.x >= kBlock)) * R1;
}
//const int globalI = baseI + localI;
//const int globalJ = baseJ + localJ;
__shared__ int localBlock[R1][R1];
__shared__ int leadBlock[R1][R1];
const int threadID = threadIdx.y * Q2 + threadIdx.x;
const int offsetI = threadID / R1;
const int offsetJ = threadID % R1;
int i = offsetI;
//#pragma unroll
for (int i1 = 0; i1 < S1; ++i1) {
int j = offsetJ;
//#pragma unroll
for (int i2 = 0; i2 < S2; ++i2) {
localBlock[i][j] = deviceGraph[(baseI + i) * pitch + (baseJ + j)];
j += T2;
}
i += T1;
}
i = offsetI;
//#pragma unroll
for (int i1 = 0; i1 < S1; ++i1) {
int j = offsetJ;
//#pragma unroll
for (int i2 = 0; i2 < S2; ++i2) {
leadBlock[i][j] = deviceGraph[(baseLead + i) * pitch + (baseLead + j)];
j += T2;
}
i += T1;
}
__syncthreads();
if (blockIdx.y == 0) {
//#pragma unroll
for (int k = 0; k < R1; ++k) {
//#pragma unroll
for (int i = 0; i < R2; ++i) {
//#pragma unroll
for (int j = 0; j < R2; ++j) {
localBlock[localI + i][localJ + j] = min(localBlock[localI + i][localJ + j],
leadBlock[localI + i][k] + localBlock[k][localJ + j]);
}
}
__syncthreads();
}
}
else {
//#pragma unroll
for (int k = 0; k < R1; ++k) {
//#pragma unroll
for (int i = 0; i < R2; ++i) {
//#pragma unroll
for (int j = 0; j < R2; ++j) {
localBlock[localI + i][localJ + j] = min(localBlock[localI + i][localJ + j],
localBlock[localI + i][k] + leadBlock[k][localJ + j]);
}
}
__syncthreads();
}
}
i = offsetI;
//#pragma unroll
for (int i1 = 0; i1 < S1; ++i1) {
int j = offsetJ;
//#pragma unroll
for (int i2 = 0; i2 < S2; ++i2) {
deviceGraph[(baseI + i) * pitch + (baseJ + j)] = localBlock[i][j];
j += T2;
}
i += T1;
}
}
__global__ void kernelDD(int* __restrict__ deviceGraph, const int pitch, const int n, const int kBlock) {
const int localI = threadIdx.y * R2;
const int localJ = threadIdx.x * R2;
const int baseLead = kBlock * R1;
const int baseI = (blockIdx.y + (blockIdx.y >= kBlock)) * R1;
const int baseJ = (blockIdx.x + (blockIdx.x >= kBlock)) * R1;
//const int globalI = baseI + localI;
//const int globalJ = baseJ + localJ;
__shared__ int leadRowBlock[R1][R1];
__shared__ int leadColumnBlock[R1][R1];
int c[R2][R2];
const int threadID = threadIdx.y * Q2 + threadIdx.x;
const int offsetI = threadID / R1;
const int offsetJ = threadID % R1;
int i = offsetI;
//#pragma unroll
for (int i1 = 0; i1 < S1; ++i1) {
int j = offsetJ;
//#pragma unroll
for (int i2 = 0; i2 < S2; ++i2) {
leadRowBlock[i][j] = deviceGraph[(baseI + i) * pitch + (baseJ + j)];
j += T2;
}
i += T1;
}
__syncthreads();
//#pragma unroll
for (int i = 0; i < R2; ++i) {
//#pragma unroll
for (int j = 0; j < R2; ++j) {
c[i][j] = leadRowBlock[localI + i][localJ + j];
}
}
__syncthreads();
i = offsetI;
//#pragma unroll
for (int i1 = 0; i1 < S1; ++i1) {
int j = offsetJ;
//#pragma unroll
for (int i2 = 0; i2 < S2; ++i2) {
leadRowBlock[i][j] = deviceGraph[(baseLead + i) * pitch + (baseJ + j)];
j += T2;
}
i += T1;
}
i = offsetI;
//#pragma unroll
for (int i1 = 0; i1 < S1; ++i1) {
int j = offsetJ;
//#pragma unroll
for (int i2 = 0; i2 < S2; ++i2) {
leadColumnBlock[i][j] = deviceGraph[(baseI + i) * pitch + (baseLead + j)];
j += T2;
}
i += T1;
}
__syncthreads();
//#pragma unroll
for (int k = 0; k < R1; ++k) {
//#pragma unroll
for (int i = 0; i < R2; ++i) {
//#pragma unroll
for (int j = 0; j < R2; ++j) {
c[i][j] = min(c[i][j], leadColumnBlock[localI + i][k] + leadRowBlock[k][localJ + j]);
}
}
}
__syncthreads();
/*
#pragma unroll
for (int i2 = 0, i1 = localI; i2 < R2; ++i1, ++i2) {
#pragma unroll
for (int j2 = 0, j1 = localJ; j2 < R2; ++j1, ++j2) {
int ind = (globalI + i2) * pitch + (globalJ + j2);
int c0 = deviceGraph[ind];
#pragma unroll
for (int k = 0; k < R1; ++k) {
c0 = min(c0, leadColumnBlock[i1][k] + leadRowBlock[k][j1]);
}
deviceGraph[ind] = c0;
}
}*/
#pragma unroll
for (int i = 0; i < R2; ++i) {
#pragma unroll
for (int j = 0; j < R2; ++j) {
leadRowBlock[localI + i][localJ + j] = c[i][j];
}
}
__syncthreads();
i = offsetI;
#pragma unroll
for (int i1 = 0; i1 < S1; ++i1) {
int j = offsetJ;
#pragma unroll
for (int i2 = 0; i2 < S2; ++i2) {
deviceGraph[(baseI + i) * pitch + (baseJ + j)] = leadRowBlock[i][j];
j += T2;
}
i += T1;
}
}
//
/*__global__ void kernelI(int* __restrict__ deviceGraph, const int pitch, const int n, const int kBlock) {
const int localI = threadIdx.y;
const int localJ = threadIdx.x;
const int base = kBlock * R1;
const int globalI = base + localI;
const int globalJ = base + localJ;
__shared__ int localBlock[R1][R1];
localBlock[localI][localJ] = deviceGraph[globalI * pitch + globalJ];
__syncthreads();
#pragma unroll
for (int k = 0; k < R1; ++k) {
localBlock[localI][localJ] = min(localBlock[localI][localJ],
localBlock[localI][k] + localBlock[k][localJ]);
__syncthreads();
}
deviceGraph[globalI * pitch + globalJ] = localBlock[localI][localJ];
}
__global__ void kernelSD(int* __restrict__ deviceGraph, const int pitch, const int n, const int kBlock) {
const int localI = threadIdx.y;
const int localJ = threadIdx.x;
const int baseLead = kBlock * R1;
int baseI, baseJ;
if (blockIdx.y == 0) {
baseI = baseLead;
baseJ = (blockIdx.x + (blockIdx.x >= kBlock)) * R1;
}
else {
baseJ = baseLead;
baseI = (blockIdx.x + (blockIdx.x >= kBlock)) * R1;
}
const int globalI = baseI + localI;
const int globalJ = baseJ + localJ;
__shared__ int localBlock[R1][R1];
__shared__ int leadBlock[R1][R1];
localBlock[localI][localJ] = deviceGraph[globalI * pitch + globalJ];
leadBlock[localI][localJ] = deviceGraph[(baseLead + localI) * pitch + (baseLead + localJ)];
__syncthreads();
if (blockIdx.y == 0) {
#pragma unroll
for (int k = 0; k < R1; ++k) {
localBlock[localI][localJ] = min(localBlock[localI][localJ],
leadBlock[localI][k] + localBlock[k][localJ]);
__syncthreads();
}
}
else {
#pragma unroll
for (int k = 0; k < R1; ++k) {
localBlock[localI][localJ] = min(localBlock[localI][localJ],
localBlock[localI][k] + leadBlock[k][localJ]);
__syncthreads();
}
}
deviceGraph[globalI * pitch + globalJ] = localBlock[localI][localJ];
}
__global__ void kernelDD(int* __restrict__ deviceGraph, const int pitch, const int n, const int kBlock) {
const int localI = threadIdx.y;
const int localJ = threadIdx.x;
const int baseLead = kBlock * R1;
const int baseI = (blockIdx.y + (blockIdx.y >= kBlock)) * R1;
const int baseJ = (blockIdx.x + (blockIdx.x >= kBlock)) * R1;
const int globalI = baseI + localI;
const int globalJ = baseJ + localJ;
__shared__ int leadRowBlock[R1][R1];
__shared__ int leadColumnBlock[R1][R1];
int c = deviceGraph[globalI * pitch + globalJ];
leadColumnBlock[localI][localJ] = deviceGraph[(baseI + localI) * pitch + (baseLead + localJ)];
leadRowBlock[localI][localJ] = deviceGraph[(baseLead + localI) * pitch + (baseJ + localJ)];
__syncthreads();
#pragma unroll
for (int k = 0; k < R1; ++k) {
c = min(c, leadColumnBlock[localI][k] + leadRowBlock[k][localJ]);
}
__syncthreads();
deviceGraph[globalI * pitch + globalJ] = c;
}*/
//
/*__global__ void kernelI(int* __restrict__ deviceGraph, const int pitch, const int n, const int kBlock) {
const int localI = threadIdx.y * R2;
const int localJ = threadIdx.x * R2;
const int base = kBlock * R1;
const int globalI = base + localI;
const int globalJ = base + localJ;
__shared__ int localBlock[R1][R1];
for (int i = 0; i < R2; ++i) {
for (int j = 0; j < R2; ++j) {
localBlock[localI + i][localJ + j] = deviceGraph[(globalI + i) * pitch + (globalJ + j)];
}
}
__syncthreads();
//#pragma unroll
for (int k = 0; k < R1; ++k) {
//#pragma unroll
for (int i = 0; i < R2; ++i) {
//#pragma unroll
for (int j = 0; j < R2; ++j) {
localBlock[localI + i][localJ + j] = min(localBlock[localI + i][localJ + j],
localBlock[localI + i][k] + localBlock[k][localJ + j]);
}
}
__syncthreads();
}
//#pragma unroll
for (int i = 0; i < R2; ++i) {
for (int j = 0; j < R2; ++j) {
deviceGraph[(globalI + i) * pitch + (globalJ + j)] = localBlock[localI + i][localJ + j];
}
}
}
__global__ void kernelSD(int* __restrict__ deviceGraph, const int pitch, const int n, const int kBlock) {
const int localI = threadIdx.y * R2;
const int localJ = threadIdx.x * R2;
const int baseLead = kBlock * R1;
int baseI, baseJ;
if (blockIdx.y == 0) {
baseI = baseLead;
baseJ = (blockIdx.x + (blockIdx.x >= kBlock)) * R1;
}
else {
baseJ = baseLead;
baseI = (blockIdx.x + (blockIdx.x >= kBlock)) * R1;
}
const int globalI = baseI + localI;
const int globalJ = baseJ + localJ;
const int leadGlobalI = baseLead + localI;
const int leadGlobalJ = baseLead + localJ;
__shared__ int localBlock[R1][R1];
__shared__ int leadBlock[R1][R1];
for (int i = 0; i < R2; ++i) {
for (int j = 0; j < R2; ++j) {
localBlock[localI + i][localJ + j] = deviceGraph[(globalI + i) * pitch + (globalJ + j)];
}
}
for (int i = 0; i < R2; ++i) {
for (int j = 0; j < R2; ++j) {
leadBlock[localI + i][localJ + j] = deviceGraph[(leadGlobalI + i) * pitch + (leadGlobalJ + j)];
}
}
__syncthreads();
if (blockIdx.y == 0) {
//#pragma unroll
for (int k = 0; k < R1; ++k) {
//#pragma unroll
for (int i = 0; i < R2; ++i) {
//#pragma unroll
for (int j = 0; j < R2; ++j) {
localBlock[localI + i][localJ + j] = min(localBlock[localI + i][localJ + j],
leadBlock[localI + i][k] + localBlock[k][localJ + j]);
}
}
__syncthreads();
}
}
else {
//#pragma unroll
for (int k = 0; k < R1; ++k) {
//#pragma unroll
for (int i = 0; i < R2; ++i) {
//#pragma unroll
for (int j = 0; j < R2; ++j) {
localBlock[localI + i][localJ + j] = min(localBlock[localI + i][localJ + j],
localBlock[localI + i][k] + leadBlock[k][localJ + j]);
}
}
__syncthreads();
}
}
for (int i = 0; i < R2; ++i) {
for (int j = 0; j < R2; ++j) {
deviceGraph[(globalI + i) * pitch + (globalJ + j)] = localBlock[localI + i][localJ + j];
}
}
}
__global__ void kernelDD(int* __restrict__ deviceGraph, const int pitch, const int n, const int kBlock) {
const int localI = threadIdx.y * R2;
const int localJ = threadIdx.x * R2;
const int baseLead = kBlock * R1;
const int baseI = (blockIdx.y + (blockIdx.y >= kBlock)) * R1;
const int baseJ = (blockIdx.x + (blockIdx.x >= kBlock)) * R1;
const int globalI = baseI + localI;
const int globalJ = baseJ + localJ;
const int leadGlobalI = baseLead + localI;
const int leadGlobalJ = baseLead + localJ;
__shared__ int leadRowBlock[R1][R1];
__shared__ int leadColumnBlock[R1][R1];
int c[R2][R2];
//#pragma unroll
for (int i = 0; i < R2; ++i) {
//#pragma unroll
for (int j = 0; j < R2; ++j) {
c[i][j] = deviceGraph[(globalI + i) * pitch + (globalJ + j)];
}
}
for (int i = 0; i < R2; ++i) {
for (int j = 0; j < R2; ++j) {
leadRowBlock[localI + i][localJ + j] = deviceGraph[(leadGlobalI + i) * pitch + (globalJ + j)];
}
}
for (int i = 0; i < R2; ++i) {
for (int j = 0; j < R2; ++j) {
leadColumnBlock[localI + i][localJ + j] = deviceGraph[(globalI + i) * pitch + (leadGlobalJ + j)];
}
}
__syncthreads();
//#pragma unroll
for (int k = 0; k < R1; ++k) {
//#pragma unroll
for (int i = 0; i < R2; ++i) {
//#pragma unroll
for (int j = 0; j < R2; ++j) {
c[i][j] = min(c[i][j], leadColumnBlock[localI + i][k] + leadRowBlock[k][localJ + j]);
}
}
}
__syncthreads();
//#pragma unroll
for (int i = 0; i < R2; ++i) {
//#pragma unroll
for (int j = 0; j < R2; ++j) {
deviceGraph[(globalI + i) * pitch + (globalJ + j)] = c[i][j];
}
}
}*/
|
23,089 | #include <cuda.h>
#include <iostream>
#define N 1024
using namespace std;
__global__ void add(int *a,int *b,int *c)
{
int tid = threadIdx.x;
if(tid < N)
{
c[tid]=a[tid]+b[tid];
}
}
int main(int argc,char *argv[])
{
int *a,*b,*c,*A_D,*B_D,*C_D;
a=new int[N];
b=new int[N];
c=new int[N];
for(int i=0;i<N;i++)
{
a[i] = i+1;
b[i] = 2*i+2;
}
cudaMalloc((void**)&A_D,N*sizeof(int));
cudaMalloc((void**)&B_D,N*sizeof(int));
cudaMemcpy(A_D,a,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(B_D,b,N*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(C_D,c,N*sizeof(int),cudaMemcpyHostToDevice);
add<<<1,N>>>(A_D,B_D,C_D);
cudaMemcpy(c,C_D,N*sizeof(int),cudaMemcpyDeviceToHost);
cout<<c[0];
cudaFree(A_D);
cudaFree(B_D);
cudaFree(C_D);
delete(a);
delete(b);
delete(c);
return 0;
} |
23,090 | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include<pthread.h>
#include<math.h>
#define MAX_THREAD 1024
#define USAGE_EXIT(s) do{ \
printf("Usage: %s <# of elements> <random seed> \n %s\n", argv[0], s); \
exit(-1);\
}while(0);
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
__global__ void calculate(long long int *da, const long long int max_in, const long long int in, const long long int prev, long long int *d_ans, const long long int ctr, const long long int u){
long long int i = blockDim.x * blockIdx.x + threadIdx.x;
long long int l = i*in + prev;
long long int r = (i+1)*in-1 + prev;
if(r > max_in)
return;
if(i%2==0){
da[l]=da[l]^da[r];
}
else{
da[r]=da[l]^da[r];
}
if(prev!=0 && ctr==u) da[0] = da[0]^da[prev];
if(ctr==u)
*d_ans = da[0];
}
int main(int argc, char **argv)
{
struct timeval start, end;
long long int *a, num_elements, ctr;
long long int *ans;
long long int *d_ans;
if(argc !=3)
USAGE_EXIT("not enough parameters");
num_elements = atoi(argv[1]);
if(num_elements <=0)
USAGE_EXIT("invalid num elements");
long long int SEED = atoi(argv[2]);
long long int size = num_elements * sizeof(long long int);
a = (long long int *)malloc(size);
ans = (long long int *)malloc(sizeof(long long int));
if(!a){
USAGE_EXIT("invalid num elements, not enough memory");
}
srand(SEED);
for(ctr=0; ctr<num_elements; ++ctr)
a[ctr] = random();
long long int * da;
cudaMalloc(&da, size);
CUDA_ERROR_EXIT("cudaMalloc1");
cudaMalloc(&d_ans, sizeof(long long int));
CUDA_ERROR_EXIT("cudaMalloc2");
cudaMemcpy(da, a, size, cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("memcpy1");
gettimeofday(&start, NULL);
long long int prev = 0;
long long int max_in = 0;
long long int flag=0;
long long int num = num_elements;
for(;;){
if(num_elements==1){
flag=1;
break;
}
if(num_elements<=0)
break;
long long int u = (long long int)log2((double)num_elements);
long long int x = pow(2,u);
max_in = prev + x -1;
for(ctr=1; ctr<=u; ++ctr){
long long int in = pow(2,ctr);
long long int threads = num_elements/in;
if(threads>MAX_THREAD)threads = MAX_THREAD;
long long int blocks = num_elements/threads;
calculate<<<blocks, threads>>>(da, max_in, in, prev, d_ans, ctr, u);
CUDA_ERROR_EXIT("kernel invocation");
}
prev += pow(2,u);
num_elements = num - prev;
}
cudaMemcpy(ans, d_ans, sizeof(long long int), cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy2");
if(flag==1 && num==1){
(*ans) = 0^a[num-1];
}
else if(flag==1){
(*ans) = (*ans)^a[num-1];
}
printf("XOR = %lld\n", (*ans));
gettimeofday(&end, NULL);
printf("Time taken = %ld microsecs\n", TDIFF(start, end));
free(a);
cudaFree(da);
free(ans);
cudaFree(d_ans);
}
|
23,091 | #include "includes.h"
__global__ void kWhere(float* condition_mat, float* if_mat, float* else_mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = condition_mat[i] ? if_mat[i] : else_mat[i];
}
} |
23,092 | #include <stdio.h>
#include "FileUtils.cuh"
int n_lines(const char *file)
{
FILE *myfile = fopen(file, "r");
int ch, n_lines = 0;
do {
ch = fgetc(myfile);
if (ch == '\n') {
n_lines++;
}
}
while (ch != EOF);
// last line doesn't end with a new line!
// but there has to be a line at least before the last line
if (ch != '\n' && n_lines != 0) {
n_lines++;
}
fclose(myfile);
return n_lines;
}
bool file_exists(const char *filename)
{
if (FILE *file = fopen(filename, "r")) {
fclose(file);
return true;
}
return false;
}
|
23,093 | #include "includes.h"
__global__ void Compute_Path(int *Md, const int Width, const int k)
{
//2 Thread ID
int ROW = blockIdx.x;
int COL = threadIdx.x;
if (Md[ROW * Width + COL] > Md[ROW * Width + k] + Md[k * Width + COL])
Md[ROW * Width + COL] = Md[ROW * Width + k] + Md[k * Width + COL];
} |
23,094 | #include "includes.h"
__global__ void BFS_UNIFIED(int source, int* edges, int* dest, int* label, int* visited, int *c_frontier_tail, int *c_frontier, int *p_frontier_tail, int *p_frontier)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < *p_frontier_tail)
{
int c_vertex = p_frontier[i];
for (int i = edges[c_vertex]; i < edges[c_vertex+1]; ++i)
{
int was_visited = atomicExch(visited + dest[i], 1);
if (!was_visited)
{
int old_tail = atomicAdd(c_frontier_tail, 1);
c_frontier[old_tail] = dest[i];
label[dest[i]] = label[c_vertex] + 1;
}
}
}
} |
23,095 | #include "includes.h"
//FILE IO RELATED
//max number of lines in the training dataset
#define MAX_ROWS_TRAINING 16896
// max number of columns/features in the training dataset
#define MAX_COLUMNS_TRAINING 26
// max number of rows in the testing dataset
#define MAX_ROWS_TESTING 4096
// max number of columns in the testing data
#define MAX_COLUMNS_TESTING 26
//max number of characters/line
#define MAX_CHAR 300
__constant__ int features = 26;
__constant__ int num_rows = 16896;
long mem_cpy_time = 0;
long beta_cpy_time = 0;
// parallelized across the rows
// parallelized across the features
__global__ void logistic_func(float* log_func_v, float* betas, float* data) {
int row_index = blockIdx.x * blockDim.x + threadIdx.x;
float temp = 0;
for(int j = 0; j < features; j++) {
float accessed_data = data[(row_index * features) + j];
temp += betas[j] * accessed_data;
}
log_func_v[row_index] = 1.0 / (1.0 + expf(-1.0 * temp));
} |
23,096 | #include "includes.h"
__global__ void testKernel( float* g_idata, float* g_odata)
{
float result=1;
// read two values
float val1 = g_idata[0];
float val2 = g_idata[1];
// place loop/unrolled loop here to do a bunch of multiply add ops
// make sure you use results, so compiler does not optomize out
result = val2 + (result * val1);
g_odata[0] = result;
} |
23,097 | #include "sum.cuh"
#include <cstdio>
#include <iostream>
const float COEFFICIENT = 1389.38757;
int get_max_cols(Matrix A) {
int globalsum = 0;
int n = A.height;
for (size_t i = 0; i < n; i++) {
int localsum = 0;
for (size_t j = 0; j < n; j++) {
if (A.elements[i * n + j] > 0) {
localsum++;
}
}
if (localsum > globalsum) {
globalsum = localsum;
}
}
return globalsum;
}
void fill_ell_matrix(ELL_Matrix ell, Matrix A, int nrows, int ncols, int ncols_per_row) {
for (size_t i = 0; i < nrows; i++) {
int colidx = 0;
for (size_t j = 0; j < ncols; j++) {
if (A.elements[i * nrows + j] != 0) {
ell.data[colidx * nrows + i] = A.elements[i * nrows + j];
ell.col_indices[colidx * nrows + i] = j;
colidx++;
}
}
}
}
void ell_mallocHost(){
}
void ell_freeHost() {
}
__device__ float sign(float x) {
float t = x < 0 ? -1.0 : 0.0;
return x > 0 ? 1.0 : t;
}
__device__ float atom_dist (atom a1, atom a2) {
float sum = (a1.x - a2.x) * (a1.x - a2.x) +
(a1.y - a2.y) * (a1.y - a2.y) +
(a1.z - a2.z) * (a1.z - a2.z);
return sqrtf(sum);
}
void calculate(float* bonds, size_t n, atom* atom_coords, float* charges) {
//TODO hard-coded value! fix it
size_t ncols_per_row = 4;
Matrix A;
A.elements = bonds;
A.height = n;
A.width = n;
size_t size = A.width * A.height * sizeof(float);
ELL_Matrix ell_A;
size_t sizefloat = n * ncols_per_row * sizeof(float);
size_t sizeint = n * ncols_per_row * sizeof(int);
cudaMallocHost((void**)&ell_A.data, sizefloat);
cudaMallocHost((void**)&ell_A.col_indices, sizeint);
ell_A.ncols = n;
ell_A.nrows = n;
ell_A.ncols_per_row = ncols_per_row;
fill_ell_matrix(ell_A, A, n, n, 4);
ELL_Matrix d_ell_A;
d_ell_A.ncols_per_row = ell_A.ncols_per_row;
d_ell_A.ncols = ell_A.ncols;
d_ell_A.nrows = ell_A.nrows;
cudaMalloc((void**)&d_ell_A.data, sizefloat);
cudaMalloc((void**)&d_ell_A.col_indices, sizeint);
cudaMemcpy(d_ell_A.data, ell_A.data, sizefloat, cudaMemcpyHostToDevice);
cudaMemcpy(d_ell_A.col_indices, ell_A.col_indices, sizeint, cudaMemcpyHostToDevice);
Matrix Asquared;
Asquared.width = n;
Asquared.height = n;
cudaMallocHost((void**)&Asquared.elements, size);
/*Matrix Acubed;
cudaMallocHost((void**)&Acubed.elements, size);
Acubed.height = n;
Acubed.width = n;*/
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
cudaMalloc((void**)&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = A.width;
d_B.height = A.height;
cudaMalloc((void**)&d_B.elements, size);
cudaMemcpy(d_B.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_Asquared;
d_Asquared.width = A.width;
d_Asquared.height = A.height;
cudaMalloc((void**)&d_Asquared.elements, size);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(A.width / dimBlock.x + 1, A.height / dimBlock.y + 1);
dim3 dimBlock1(BLOCK_SIZE);
dim3 dimGrid1(n / dimBlock1.x + 1);
//std::cout << "Launching kernel for multiply" << std::endl;
//matrix_mult<<<dimGrid, dimBlock>>>(d_A, d_B, d_Asquared);
//cudaDeviceSynchronize();
//std::cout << cudaGetErrorString(cudaGetLastError()) << std::endl;
for (int i = 0; i < n; i++) {
ell_mult<<<dimGrid1, dimBlock1>>>(d_ell_A, d_A, d_Asquared, i);
//cudaDeviceSynchronize();
//std::cout << cudaGetErrorString(cudaGetLastError()) << std::endl;
}
cudaDeviceSynchronize();
cudaMemcpy(Asquared.elements, d_Asquared.elements, size, cudaMemcpyDeviceToHost);
int ncols_per_row2 = get_max_cols(Asquared);
ELL_Matrix ell_Asq;
size_t sizefloat2 = n * ncols_per_row2 * sizeof(float);
size_t sizeint2 = n * ncols_per_row2 * sizeof(int);
cudaMallocHost((void**)&ell_Asq.data, sizefloat2);
cudaMallocHost((void**)&ell_Asq.col_indices, sizeint2);
ell_Asq.ncols = n;
ell_Asq.nrows = n;
ell_Asq.ncols_per_row = ncols_per_row2;
fill_ell_matrix(ell_Asq, Asquared, n, n, ncols_per_row2);
ELL_Matrix d_ell_Asq;
d_ell_Asq.ncols_per_row = ell_Asq.ncols_per_row;
d_ell_Asq.ncols = ell_Asq.ncols;
d_ell_Asq.nrows = ell_Asq.nrows;
cudaMalloc((void**)&d_ell_Asq.data, sizefloat2);
cudaMalloc((void**)&d_ell_Asq.col_indices, sizeint2);
cudaMemcpy(d_ell_Asq.data, ell_Asq.data, sizefloat2, cudaMemcpyHostToDevice);
cudaMemcpy(d_ell_Asq.col_indices, ell_Asq.col_indices, sizeint2, cudaMemcpyHostToDevice);
Matrix d_Acubed;
d_Acubed.width = A.width;
d_Acubed.height = A.height;
cudaMalloc((void**)&d_Acubed.elements, size);
atom* d_atoms;
cudaMalloc((void**)&d_atoms, n * sizeof(atom));
cudaMemcpy(d_atoms, atom_coords, n * sizeof(atom), cudaMemcpyHostToDevice);
for (int i = 0; i < n; i++) {
ell_mult<<<dimGrid1, dimBlock1>>>(d_ell_Asq, d_A, d_Acubed, i);
//cudaDeviceSynchronize();
//std::cout << cudaGetErrorString(cudaGetLastError()) << std::endl;
}
//matrix_mult<<<dimGrid, dimBlock>>>(d_A, d_Asquared, d_Acubed);
cudaDeviceSynchronize();
std::cout << cudaGetErrorString(cudaGetLastError()) << std::endl;
cudaDeviceSynchronize();
std::cout << "Finished multiplication" << std::endl;
//cudaMemcpy(Acubed.elements, d_Acubed.elements, size, cudaMemcpyDeviceToHost);
Matrix F;
F.width = n;
F.height = n;
cudaMalloc((void**)&F.elements, size);
get_matrix_F<<<dimGrid, dimBlock>>>(d_Acubed, d_Asquared, d_A, d_atoms, F);
cudaDeviceSynchronize();
std::cout << cudaGetErrorString(cudaGetLastError()) << std::endl;
/* Matrix F_host;
F_host.width = F.width;
F_host.height = F.width;
cudaMallocHost((void**)&F_host.elements, size);
cudaMemcpy(F_host.elements, F.elements, size, cudaMemcpyDeviceToHost);
for (size_t i = 0; i < n * n; i++) {
if (F_host.elements[i] != 0) {
std::cout << F_host.elements[i] << std::endl;
}
}*/
float* res;
float* d_res;
float* d_charges;
cudaMallocHost((void**)&res, n * sizeof(float));
cudaMalloc((void**)&d_res, n * sizeof(float));
cudaMalloc((void**)&d_charges, n * sizeof(float));
cudaMemcpy(d_charges, charges, n * sizeof(float), cudaMemcpyHostToDevice);
/* dim3 dimBlock1(BLOCK_SIZE);
dim3 dimGrid1(n / dimBlock1.x + 1);*/
get_vector_to_sum<<<dimGrid1, dimBlock1>>>(F, d_charges, d_res);
cudaDeviceSynchronize();
std::cout << cudaGetErrorString(cudaGetLastError()) << std::endl;
cudaMemcpy(res, d_res, n * sizeof(float), cudaMemcpyDeviceToHost);
float ans = 0;
for (size_t i = 0; i < n; i++) {
ans += res[i];
}
std::cout << "Answer is " << (COEFFICIENT * ans) / 2 << std::endl;
cudaFree(d_A.elements);
cudaFree(d_Asquared.elements);
cudaFree(d_Acubed.elements);
cudaFree(d_atoms);
cudaFree(d_res);
cudaFree(d_charges);
}
__global__ void matrix_mult(Matrix A, Matrix B, Matrix C){
float Cvalue = 0;
if (blockIdx.y * blockDim.y + threadIdx.y < A.width && blockIdx.x * blockDim.x + threadIdx.x < A.width){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e) {
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
}
C.elements[row * C.width + col] = Cvalue;
}
}
__global__ void get_matrix_F(Matrix ACube, Matrix ASquare, Matrix A, atom* atoms, Matrix F) {
float value = 0;
float cube = 0;
float square = 0;
float a = 0;
if (blockIdx.y * blockDim.y + threadIdx.y < A.width && blockIdx.x * blockDim.x + threadIdx.x < A.width){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int idx = row * A.width + col;
if (row != col) {
value = 1 - 0.5 * sign(ACube.elements[idx] + ASquare.elements[idx] + A.elements[idx]) -
0.5 * sign(ASquare.elements[idx] + A.elements[idx]);
value /= atom_dist(atoms[row], atoms[col]);
F.elements[idx] = value;
}
else {
F.elements[idx] = 0.0;
}
}
}
__global__ void get_vector_to_sum(Matrix F, float* q, float* res) {
float value = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < F.width) {
for (int j = 0; j < F.height; j++) { //TODO maybe permute indices order
value += F.elements[j * F.height + i] * q[j];
}
value *= q[i];
res[i] = value;
}
}
/**
* kernel for ELL matrix-vector multiplication
*
*/
__global__ void ell_mult(ELL_Matrix A, Matrix B, Matrix C, int i){
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < A.nrows) {
float dot = 0;
for (int idx = 0; idx < A.ncols_per_row; idx++) {
int col = A.col_indices[A.nrows * idx + row];
float val = A.data[A.nrows * idx + row];
if (val != 0) {
dot += val * B.elements[i * B.height + col];
}
C.elements[i * C.height + row] = dot;
}
}
}
|
23,098 | #include "includes.h"
__global__ void kernelGradf(const float *d_x, float *d_grad)
{
const float x0 = d_x[0];
const float x1 = d_x[1];
// df/dx0 = -2 (1-x0) - 400 (x1-x0^2) x0
// df/dx1 = 200 (x1 - x0^2)
d_grad[0] = -2.0f * (1.0f - x0) - 400.0f * x0 * (x1 - x0*x0);
d_grad[1] = 200.0f * (x1 - x0*x0);
} |
23,099 | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <algorithm>
#include <cstring>
#include <cstdint>
#include <cstdlib>
#include <iostream>
#include <vector>
#include <utility>
#include <cuda_runtime.h>
#include <libpng/png.h>
struct pixel
{
uint8_t red;
uint8_t green;
uint8_t blue;
uint8_t alpha;
};
#define gpuErrchk(ans) gpuAssert((ans), __FILE__, __LINE__)
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
std::cerr << "GPU error: " << cudaGetErrorString(code) << " at " << file << ":" << line << "\n";
if (abort)
{
std::exit(code);
}
}
}
constexpr unsigned BlockLength = 31;
__global__ void convolution(const pixel* image, const int width, const int height, const float* matrix, const int matrixLength, pixel* out)
{
extern __shared__ float sMatrix[];
pixel* subImage = (pixel*)(sMatrix + (matrixLength * matrixLength));
const int subImageLength = BlockLength + (matrixLength / 2) * 2;
const int xIdx = blockIdx.x * blockDim.x + threadIdx.x;
const int yIdx = blockIdx.y * blockDim.y + threadIdx.y;
// Copy convolution matrix to shared memory
if (threadIdx.x < matrixLength && threadIdx.y < matrixLength)
{
sMatrix[threadIdx.x + threadIdx.y * matrixLength] = matrix[threadIdx.x + threadIdx.y * matrixLength];
}
const int apronSize = matrixLength / 2;
const int shmemXIdx = matrixLength / 2 + threadIdx.x;
const int shmemYIdx = matrixLength / 2 + threadIdx.y;
// Clamping the accessed coordinates to the limits of the image, as we still
// want threads that would be over that to participate in copying the image
// chunk to shared memory
const int cXIdx = min(width - 1, xIdx);
const int cYIdx = min(height - 1, yIdx);
// Copy center part of the image chunk in shared memory
subImage[shmemXIdx + shmemYIdx * subImageLength] = image[cXIdx - cYIdx * width];
// Copy apron part of the image chunk in shared memory
if (threadIdx.x < apronSize)
{
subImage[shmemXIdx - apronSize + shmemYIdx * subImageLength] = image[max(0, cXIdx - apronSize) + cYIdx * width];
// Copy a corner of the apron
if (threadIdx.y < apronSize)
{
subImage[shmemXIdx - apronSize + (shmemYIdx - apronSize) * subImageLength] =
image[max(0, cXIdx - apronSize) + max(0, cYIdx - apronSize) * width];
}
}
if (threadIdx.y < apronSize)
{
subImage[shmemXIdx + (shmemYIdx - apronSize) * subImageLength] = image[cXIdx + max(0, cYIdx - apronSize) * width];
if (threadIdx.x >= blockDim.x - apronSize)
{
subImage[shmemXIdx + apronSize + (shmemYIdx - apronSize) * subImageLength] =
image[min(cXIdx + apronSize, width) + max(0, cYIdx - apronSize) * width];
}
}
if (threadIdx.x >= (blockDim.x - apronSize))
{
subImage[shmemXIdx + apronSize + shmemYIdx * subImageLength] = image[min(cXIdx + apronSize, width - 1) + cYIdx * width];
if (threadIdx.y >= blockDim.y - apronSize)
{
subImage[shmemXIdx + apronSize + (shmemYIdx + apronSize) * subImageLength] =
image[min(cXIdx + apronSize, width - 1) + min(height - 1, cYIdx + apronSize) * width];
}
}
if (threadIdx.y >= (blockDim.y - apronSize))
{
subImage[shmemXIdx + (shmemYIdx + apronSize) * subImageLength] = image[cXIdx + min(height - 1, cYIdx + apronSize) * width];
if (threadIdx.x < apronSize)
{
subImage[shmemXIdx - apronSize + (shmemYIdx + apronSize) * subImageLength] =
image[max(0, cXIdx - apronSize) + min(height - 1, cYIdx + apronSize) * width];
}
}
// At this point we are done copying the sub-image to the shared memory
// Discard threads that do not participate in the final image
if (xIdx > width || yIdx >= height)
{
return;
}
float accR = 0.0;
float accG = 0.0;
float accB = 0.0;
// Apply the convolution
for (int i = 0; i < matrixLength; i++)
{
for (int j = 0; j < matrixLength; j++)
{
int baseX = shmemXIdx - apronSize;
int baseY = shmemYIdx - apronSize;
accR += (sMatrix[i * matrixLength + j] * subImage[baseX + j + (baseY + i) * subImageLength].red);
accG += (sMatrix[i * matrixLength + j] * subImage[baseX + j + (baseY + i) * subImageLength].green);
accB += (sMatrix[i * matrixLength + j] * subImage[baseX + j + (baseY + i) * subImageLength].blue);
}
}
// Clamp values to avoid overflows
accR = max(0., min(accR, 255.));
accG = max(0., min(accG, 255.));
accB = max(0., min(accB, 255.));
const pixel res = {(uint8_t)accR, (uint8_t)accG, (uint8_t)accB, 255};
out[xIdx + yIdx * width] = res;
}
static void loadImage(const std::string& filename, png_image& header, std::vector<uint8_t>& image)
{
std::memset(&header, 0, sizeof(header));
header.version = PNG_IMAGE_VERSION;
// Load image metadata from fs
if (!png_image_begin_read_from_file(&header, filename.c_str()))
{
std::cerr << "Failed to open image" << filename << "\n";
std::exit(1);
}
std::cout << "Image "<< filename << " size: " << header.width << " * " << header.height << "\n";
// Load image data from fs
header.format = PNG_FORMAT_RGBA;
image.resize(PNG_IMAGE_SIZE(header));
if (!png_image_finish_read(&header, NULL/*background*/, image.data(), 0/*row_stride*/, NULL/*colormap*/))
{
std::cerr << "Failed to read image " << filename << "\n";
std::exit(1);
}
}
static uint8_t* copyImageToDevice(const png_image& header, const std::vector<uint8_t>& image)
{
uint8_t* dImage;
gpuErrchk(cudaMalloc(&dImage, PNG_IMAGE_SIZE(header)));
gpuErrchk(cudaMemcpy(dImage, image.data(), PNG_IMAGE_SIZE(header), cudaMemcpyHostToDevice));
return dImage;
}
static void runConvolutionKernel(const png_image& header, std::vector<uint8_t>& hImage, uint8_t* dImage, const float* dMatrix, const int matrixLength, uint8_t* dOutput)
{
dim3 blockSize(BlockLength, BlockLength);
dim3 gridSize(header.width / blockSize.x + ((header.width % blockSize.x) != 0),
header.height / blockSize.y + ((header.height % blockSize.y) != 0));
int subImageLength = BlockLength + (matrixLength / 2) * 2;
int sharedMemorySize = (matrixLength * matrixLength * sizeof(float)) + (subImageLength * subImageLength * sizeof(pixel));
std::cout << "Grid size: " << gridSize.x << " * " << gridSize.y << " * " << gridSize.z << "\n";
std::cout << "Block size: " << blockSize.x << " * " << blockSize.y << " * " << blockSize.z << "\n";
convolution<<<gridSize, blockSize, sharedMemorySize>>>((pixel*)dImage, header.width, header.height, dMatrix, matrixLength, (pixel*)dOutput);
gpuErrchk(cudaGetLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(hImage.data(), dOutput, PNG_IMAGE_SIZE(header), cudaMemcpyDeviceToHost));
}
static void saveImage(const std::string& filename, png_image& header, std::vector<uint8_t>& image)
{
if (!png_image_write_to_file(&header, filename.c_str(), 0/*convert_to_8bit*/, image.data(), 0/*row_stride*/, NULL/*colormap*/))
{
std::cerr << "Failed to save image " << filename << "\n";
std::exit(1);
}
}
int main(int argc, char* argv[])
{
std::string imagePath = ".";
if (argc > 1)
{
imagePath = argv[1];
}
png_image headerChecker;
std::vector<uint8_t> imageChecker;
loadImage(imagePath + "/checkerboard.png", headerChecker, imageChecker);
uint8_t* dImageChecker = copyImageToDevice(headerChecker, imageChecker);
uint8_t* dOutputChecker;
gpuErrchk(cudaMalloc(&dOutputChecker, PNG_IMAGE_SIZE(headerChecker)));
png_image headerIcon;
std::vector<uint8_t> imageIcon;
loadImage(imagePath + "/icon.png", headerIcon, imageIcon);
uint8_t* dImageIcon = copyImageToDevice(headerIcon, imageIcon);
uint8_t* dOutputIcon;
gpuErrchk(cudaMalloc(&dOutputIcon, PNG_IMAGE_SIZE(headerIcon)));
// Allocate convolution matrix buffer (in this example, we use at most a 5x5 matrix)
float* dMatrix;
gpuErrchk(cudaMalloc(&dMatrix, 5 * 5 * sizeof(double)));
const float edgeDetectionMatrix[9] = {-1.f, -1.f, -1.f,
-1.f, 8.f, -1.f,
-1.f, -1.f, -1.f};
gpuErrchk(cudaMemcpy(dMatrix, edgeDetectionMatrix, sizeof(edgeDetectionMatrix), cudaMemcpyHostToDevice));
runConvolutionKernel(headerChecker, imageChecker, dImageChecker, dMatrix, 3, dOutputChecker);
gpuErrchk(cudaFree(dImageChecker));
saveImage(imagePath + "/checkerboard-out.png", headerChecker, imageChecker);
float identityMatrix[9] = {0.f, };
identityMatrix[4] = 1.f;
gpuErrchk(cudaMemcpy(dMatrix, identityMatrix, sizeof(identityMatrix), cudaMemcpyHostToDevice));
runConvolutionKernel(headerIcon, imageIcon, dImageIcon, dMatrix, 3, dOutputIcon);
gpuErrchk(cudaFree(dImageIcon));
saveImage(imagePath + "/icon-out.png", headerIcon, imageIcon);
// TODO: Uncomment me when reaching step 06
/*
png_image headerCoffee;
std::vector<uint8_t> imageCoffee;
loadImage(imagePath + "/coffee.png", headerCoffee, imageCoffee);
uint8_t* dImageCoffee = copyImageToDevice(headerCoffee, imageCoffee);
uint8_t* dOutputCoffee;
gpuErrchk(cudaMalloc(&dOutputCoffee, PNG_IMAGE_SIZE(headerCoffee)));
const float gaussianBlurMatrix[25] = {1.f / 256.f, 4.f / 256.f, 6.f / 256.f, 4.f / 256.f, 1.f / 256.f,
4.f / 256.f, 16.f / 256.f, 24.f / 256.f, 16.f / 256.f, 4.f / 256.f,
6.f / 256.f, 24.f / 256.f, 36.f / 256.f, 24.f / 256.f, 6.f / 256.f,
4.f / 256.f, 16.f / 256.f, 24.f / 256.f, 16.f / 256.f, 4.f / 256.f,
1.f / 256.f, 4.f / 256.f, 6.f / 256.f, 4.f / 256.f, 1.f / 256.f};
gpuErrchk(cudaMemcpy(dMatrix, gaussianBlurMatrix, 25, cudaMemcpyHostToDevice));
runConvolutionKernel(headerCoffee, imageCoffee, dImageCoffee, dMatrix, 5, dOutputCoffee);
gpuErrchk(cudaFree(dImageCoffee));
saveImage(imagePath + "/coffee-out.png", headerCoffee, imageCoffee);
*/
cudaDeviceReset();
return 0;
}
|
23,100 | #include <iostream>
using namespace std;
// Scan, limited to 1 block, upto 1024 threads;
__global__
void scan(unsigned int *g_odata, unsigned int *g_idata, int n) {
extern __shared__ unsigned int temp[]; // allocated on invocation
int thid = threadIdx.x;
int pout = 0, pin = 1;
int Ndim=n;
// Load input into shared memory.
// This is exclusive scan, so shift right by one
// and set first element to 0
if(thid>=n)
return;
temp[pout*Ndim + thid] = (thid > 0) ? g_idata[thid-1] : 0; // Exclusive scan
// temp[pout*n + thid]=g_idata[thid]; // Inclusive
__syncthreads();
for (int offset = 1; offset < n; offset *= 2)
{
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (thid >= offset)
temp[pout*Ndim+thid] = temp[pin*Ndim+thid] + temp[pin*Ndim+thid - offset]; // Code on CUDA tutorial page is Wrong!
else
temp[pout*Ndim+thid] = temp[pin*Ndim+thid];
__syncthreads();
}
g_odata[thid] = temp[pout*Ndim+thid]; // write output
}
void scan_small(unsigned int * d_cdf, unsigned int * d_input, int N){
int Nblock=1;
int Nthread=N;
unsigned int sizeN=N*sizeof(unsigned int);
scan<<<Nblock,Nthread,2*sizeN>>>(d_cdf,d_input,N);
}
int main(){
const int N=100;
unsigned int sizeN=N*sizeof(unsigned int);
unsigned int *h_input=new unsigned int[N];
unsigned int *h_cdf=new unsigned int[N]();
for(int i=0;i<N;++i){
h_input[i]=1;
}
unsigned int * d_input, *d_cdf;
cudaMalloc(&d_input, sizeN);
cudaMalloc(&d_cdf, sizeN);
cudaMemcpy(d_input,h_input,sizeN,cudaMemcpyHostToDevice);
scan_small(d_cdf,d_input,N);
cudaMemcpy(h_cdf,d_cdf,sizeN,cudaMemcpyDeviceToHost);
unsigned int acc=0;
for(int i=0;i<N;++i){
printf("%u ", acc);
acc += h_input[i];
}
printf("\n");
for(int i=0;i<N;++i){
printf("%u ", h_cdf[i]);
}
cudaFree(d_input); cudaFree(d_cdf);
delete[] h_input; delete[] h_cdf;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.