serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
3,401 | #include <stdio.h>
static void encrypt(unsigned char *cipher, unsigned char const *clear, int clear_len, unsigned char const *key, int key_len) {
int key_cur = 0;
for(int i = 0; i < clear_len; i++) {
auto key_byte = key[key_cur];
cipher[i] = clear[i] ^ key_byte;
key_cur++;
if(key_cur == key_len) {
key_cur = 0;
}
}
}
static void decrypt(unsigned char *clear, unsigned char const *cipher, int clear_len, unsigned char const *key, int key_len) {
int key_cur = 0;
for(int i = 0; i < clear_len; i++) {
auto key_byte = key[key_cur];
clear[i] = cipher[i] ^ key_byte;
key_cur++;
if(key_cur == key_len) {
key_cur = 0;
}
}
}
static void generate_key(unsigned char* key, int key_len) {
for(int i = 0; i < key_len; i++) {
key[i] = rand() & 0xFF;
}
}
static void crack_key(unsigned char *key, unsigned char const *ciphertext, unsigned char const *cleartext, int msg_len) {
// TODO:
}
constexpr int KEY_LEN = 128;
constexpr int MSG_LEN = 256;
static void generate_message(char *msg, int msg_len, char const *rep, int rep_len) {
int rep_cur = 0;
for(int i = 0; i < msg_len; i++) {
msg[i] = rep[rep_cur];
rep_cur++;
if(rep_cur == rep_len) {
rep_cur = 0;
}
}
}
int main(int argc, char **argv) {
srand(0);
auto key = new unsigned char[KEY_LEN];
generate_key(key, KEY_LEN);
auto msg = new char[MSG_LEN];
auto cipher = new char[MSG_LEN];
auto src = "hajnalban tamadunk ";
generate_message(msg, MSG_LEN, src, strlen(src));
printf("Message: %.*s\nKey: '%.*s'\n", MSG_LEN, msg, KEY_LEN, key);
encrypt((unsigned char*)cipher, (unsigned char*)msg, MSG_LEN, (unsigned char*)key, KEY_LEN);
printf("Ciphertext:\n'%.*s'\n", MSG_LEN, cipher);
return 0;
}
|
3,402 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand.h>
#include <random>
#include <iostream>
#include <math.h>
#include <iomanip>
#include <string>
#include <map>
#include <cstdlib>
#include <ctime>
#include <fstream>
#include <sys/time.h>
#include <stdio.h>
//#define NUM_PARTICLES 1e7
#define NUM_ITERATIONS 5000
#define dt 1.0f
__host__ __device__ float3 operator+(const float3 &a, const float3 &b) {
return make_float3(a.x+b.x, a.y+b.y, a.z+b.z);
}
__host__ __device__ float3 operator*(const float &a, const float3 &b) {
return make_float3(a*b.x, a*b.y, a*b.z);
}
__host__ __device__ float3 operator*(const float3 &a, const float &b) {
return make_float3(b*a.x, b*a.y, b*a.z);
}
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
struct Particle {
float3 position;
float3 velocity;
};
__global__ void timeStep(Particle *par, int TPB){
int index = blockIdx.x * TPB + threadIdx.x;
par[index].velocity = par[index].velocity + (-1.0f * par[index].position * dt);
par[index].position = par[index].position + par[index].velocity * dt;
}
void timeStep_cpu(Particle *par, int NUM_PARTICLES){
for(int i = 0; i<NUM_PARTICLES; ++i){
par[i].velocity = par[i].velocity + (-1.0f * (par[i].position) * dt);
}
for(int i = 0; i<NUM_PARTICLES; ++i){
par[i].position = par[i].position + (par[i].velocity) * dt;
}
}
int main(int argc, char **argv) {
int TPB = atoi(argv[1]);
int NUM_PARTICLES = atoi(argv[2]);
int block_sizes[] = {16, 32, 64, 128, 256};
int num_particles[] = {10, 100, 1000, 10000, 100000};
//int block_array_len = 5;
//int num_particles_len = 5;
int block_array_len = 1;
int num_particles_len = 1;
Particle *particles;
Particle *d_particles;
Particle *particlesCompare;
std::ofstream myfile("data.txt");
for(int i = 0; i < block_array_len; ++i){
for(int j = 0; j < num_particles_len; ++j){
//std::cout << "TPB: " << TPB << "\n";
//std::cout << "numP: " << NUM_PARTICLES << "\n";
//TPB = block_sizes[i];
//NUM_PARTICLES = num_particles[j];
std::cout << "TPB: " << TPB << ", num particles: " << NUM_PARTICLES << "\n";
int BLOCKS = (NUM_PARTICLES + TPB - 1)/TPB;
//Particle *particles = (Particle *)calloc(NUM_PARTICLES, sizeof(Particle));
particles = (Particle*)malloc(sizeof(Particle)*NUM_PARTICLES);
particlesCompare = (Particle*)malloc(sizeof(Particle)*NUM_PARTICLES);
for(int k = 0; k < NUM_PARTICLES; ++k){
particles[k].position = make_float3((float)rand()/(float)(RAND_MAX)*5.0f, (float)rand()/(float)(RAND_MAX)*5.0f, (float)rand()/(float)(RAND_MAX)*5.0f);
particles[k].velocity = make_float3((float)rand()/(float)(RAND_MAX)*5.0f, (float)rand()/(float)(RAND_MAX)*5.0f, (float)rand()/(float)(RAND_MAX)*5.0f);
}
// Kopiera partiklarna in i compare
for (int k = 0; k < NUM_PARTICLES; ++k)
{
particlesCompare[k].position = particles[k].position;
particlesCompare[k].velocity = particles[k].velocity;
}
/* // verifiera att kopieringen ovan fungerar
printf("ParticlesCompare:\nPosition: x=%f, y=%f, z=%f\n", particlesCompare[0].position.x, particlesCompare[0].position.y, particlesCompare[0].position.z);
printf("Velocity: x=%f, y=%f, z=%f\n\n", particlesCompare[0].velocity.x, particlesCompare[0].velocity.y, particlesCompare[0].velocity.z);
for (int i = 0; i < NUM_PARTICLES; ++i){
printf("Position: x=%f, y=%f, z=%f\n", particles[i].position.x, particles[i].position.y, particles[i].position.z);
printf("Velocity: x=%f, y=%f, z=%f\n\n", particles[i].velocity.x, particles[i].velocity.y, particles[i].velocity.z);
}
*/
int size = sizeof(Particle)*NUM_PARTICLES;
cudaMalloc((void **)&d_particles, size);
cudaMemcpy(d_particles, particles, size, cudaMemcpyHostToDevice);
printf("Calculating on GPU... \n");
double iStart = cpuSecond();
for(int k = 0; k < NUM_ITERATIONS; ++k){
timeStep <<< BLOCKS, TPB >>>(d_particles, TPB);
cudaDeviceSynchronize();
}
double time_elapsed = cpuSecond() - iStart;
printf("Done, elapsed time: %f s\n", time_elapsed );
if (myfile.is_open()){
myfile << "GPU " << TPB << " " << NUM_PARTICLES << " " << time_elapsed << "\n";
}
cudaMemcpy(particlesCompare, d_particles, size, cudaMemcpyDeviceToHost);
/*
printf("Particle values after timeStep:\n");
for (int i = 0; i < NUM_PARTICLES; ++i){
printf("Position: x=%f, y=%f, z=%f\n", particlesCompare[i].position.x, particlesCompare[i].position.y, particlesCompare[i].position.z);
printf("Velocity: x=%f, y=%f, z=%f\n\n", particlesCompare[i].velocity.x, particlesCompare[i].velocity.y, particlesCompare[i].velocity.z);
}
*/
printf("Calculating on CPU... \n");
iStart = cpuSecond();
for(int k = 0; k < NUM_ITERATIONS; ++k){
timeStep_cpu(particles, NUM_PARTICLES);
}
time_elapsed = cpuSecond() - iStart;
if (myfile.is_open()){
myfile << "CPU 0 " << NUM_PARTICLES << " " << time_elapsed << "\n";
}
printf("Done, elapsed time: %f s\n", time_elapsed );
/* for (int i = 0; i < NUM_PARTICLES; ++i){
printf("Position: x=%f, y=%f, z=%f\n", particles[i].position.x, particles[i].position.y, particles[i].position.z);
printf("Velocity: x=%f, y=%f, z=%f\n\n", particles[i].velocity.x, particles[i].velocity.y, particles[i].velocity.z);
}
*/
}
}
myfile.close();
return 0;
}
|
3,403 | __global__ void powered_exponential_kernel(double* dist, double* cov,
const int n, const int nm,
const double sigma2, const double phi,
const double kappa, const double nugget)
{
int n_threads = gridDim.x * blockDim.x;
int pos = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = pos; i < nm; i += n_threads)
cov[i] = sigma2 * exp( -pow(dist[i] / phi, kappa) ) + nugget*( i%n == i/n );
}
void cov_powered_exponential_gpu(double* dist, double* cov,
const int n, const int m,
double sigma2, double phi,
double kappa, double nugget,
int n_threads)
{
int nm = n*m;
int blocks = (n+n_threads-1)/n_threads;
powered_exponential_kernel<<<blocks, n_threads>>>(dist, cov, n, nm, sigma2, phi, kappa, nugget);
cudaDeviceSynchronize();
}
|
3,404 | #include "includes.h"
__global__ void AdaptWinningFractionKernel( int s1, float *winningFraction, int *winningCount, float bParam, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
winningFraction[threadId] = winningFraction[threadId] + bParam * ((float)(threadId == s1) - winningFraction[threadId]);
winningCount[threadId] = winningCount[threadId] + (threadId == s1) * 1;
}
} |
3,405 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19) {
if (comp == coshf(var_1 * (var_2 - fmodf((+1.2526E34f * (+0.0f * +1.1190E-42f / (var_3 - var_4 - var_5))), -1.5493E-35f)))) {
if (comp >= -1.8523E36f / (var_6 + (var_7 * (var_8 + +1.5254E35f)))) {
comp += -1.6531E-35f / (+1.8575E36f + cosf((+1.6701E35f * (var_9 - -1.9499E-41f))));
if (comp == (-1.4244E-12f * (+1.0691E-35f + var_10 * var_11 - atan2f((var_12 - -1.6145E-36f), -1.9264E1f)))) {
float tmp_1 = (var_13 + (var_14 - (var_15 * var_16 / var_17)));
float tmp_2 = +1.8803E-36f;
comp = tmp_2 / tmp_1 * var_18 / var_19;
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20);
cudaDeviceSynchronize();
return 0;
}
|
3,406 | //Based on the work of Andrew Krepps
#include <iostream>
#include <random>
#include <stdio.h>
__global__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
__global__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
__global__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
__global__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
int a[totalThreads], b[totalThreads], c[totalThreads];
int *gpu_a, *gpu_b, *gpu_c;
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_c, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
std::cout << "A:" << std::endl;
printArray(a, numBlocks, blockSize);
std::cout << "B:" << std::endl;
printArray(b, numBlocks, blockSize);
cudaMemcpy(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
// Add all of the numbers c[i] = a[i] + b[i];
add<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_c);
cudaMemcpy(c, gpu_c, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "Add: " << std::endl;
printArray(c, numBlocks, blockSize);
// Subtract all of the numbers c[i] = a[i] - b[i];
subtract<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_c);
cudaMemcpy(c, gpu_c, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "Sub: " << std::endl;
printArray(c, numBlocks, blockSize);
// Multiply all of the numbers c[i] = a[i] * b[i];
mult<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_c);
cudaMemcpy(c, gpu_c, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "Mult: " << std::endl;
printArray(c, numBlocks, blockSize);
// Mod all of the numbers c[i] = a[i] % b[i];
mod<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_c);
cudaMemcpy(c, gpu_c, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "Mod: " << std::endl;
printArray(c, numBlocks, blockSize);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
return 0;
}
|
3,407 | //#include "mdCuda.h"
__global__ void kernelForce(int NA, double* FFX, double* FFY, double* FFZ, double* EE, double* X, double* Y, double* Z, int IPBC, double *Params)
{
double XIJ, YIJ, ZIJ, RIJ, RIJ2, EPP, FX2, FY2, FZ2;
double ARG1, ARG2, EXP1, EXP2, UIJ1, UIJ2, UIJ;
double FAC1, FAC2, FAC12, XRIJ, YRIJ, ZRIJ;
double PP0 = Params[0];
double PP1 = Params[1];
double PP2 = Params[2];
double AL1 = Params[3];
double AL2 = Params[4];
double A1 = Params[5];
double A2 = Params[6];
double RL1 = Params[7];
double RL2 = Params[8];
double D21 = Params[9];
double D22 = Params[10];
int i = blockIdx.x * blockDim.x + threadIdx.x;
EPP = 0;
// Forces that effect atoms indexed with i in all three axes
FX2 = 0;
FY2 = 0;
FZ2 = 0;
for(int j=0; j<NA; j++)
{
if(i == j)
continue;
// Apply periodic boundaries and find distances between atom I and j. RIJ2 is square of RIJ
XIJ = X[i] - X[j];
YIJ = Y[i] - Y[j];
ZIJ = Z[i] - Z[j];
double DD, ID;
if(IPBC != 0){
if(PP0 > 0){
DD = XIJ / PP0;
ID = int(DD);
XIJ = XIJ - PP0*(ID+int(2.0*(DD-ID)));
}
if(PP1 > 0){
DD = YIJ / PP1;
ID = int(DD);
YIJ = YIJ - PP1*(ID+int(2.0*(DD-ID)));
}
if(PP2 > 0){
DD = ZIJ / PP2;
ID = int(DD);
ZIJ = ZIJ - PP2*(ID+int(2.0*(DD-ID)));
}
}
RIJ2 = XIJ*XIJ + YIJ*YIJ + ZIJ*ZIJ;
RIJ = sqrt(RIJ2);
// Calculate potential energy U(r)
ARG1 = AL1*RIJ2;
ARG2 = AL2*RIJ2;
EXP1 = exp(-ARG1);
EXP2 = exp(-ARG2);
UIJ1 = A1*EXP1/(pow(RIJ,RL1));
UIJ2 = A2*EXP2/(pow(RIJ,RL2));
UIJ = D21*UIJ1 + D22*UIJ2;
EPP = EPP+UIJ;
// Calculate forces
FAC1 = -(RL1/RIJ + 2.0*AL1*RIJ);
FAC2 = -(RL2/RIJ + 2.0*AL2*RIJ);
FAC12 = FAC1*D21*UIJ1 + FAC2*D22*UIJ2;
XRIJ = XIJ/RIJ;
YRIJ = YIJ/RIJ;
ZRIJ = ZIJ/RIJ;
FX2 += FAC12*XRIJ;
FY2 += FAC12*YRIJ;
FZ2 += FAC12*ZRIJ;
}
FFX[i] = -FX2;
FFY[i] = -FY2;
FFZ[i] = -FZ2;
EE[i] = EPP;
} |
3,408 | /*
icc propagate-toz-test.C -o propagate-toz-test.exe -fopenmp -O3
*/
#include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <sys/time.h>
#include <iostream>
#include <chrono>
#include <iomanip>
//#define DUMP_OUTPUT
#define FIXED_RSEED
//#define USE_ASYNC
#ifndef USE_ASYNC
#define num_streams 1
#endif
#ifndef nevts
#define nevts 100
#endif
#ifndef bsize
#define bsize 32
#endif
#ifndef ntrks
#define ntrks 9600 //122880
#endif
#define nb (ntrks/bsize)
#define smear 0.1
#ifndef NITER
#define NITER 5
#endif
#ifndef nlayer
#define nlayer 20
#endif
#ifndef num_streams
#define num_streams 10
#endif
#ifndef threadsperblockx
#define threadsperblockx bsize
#endif
#define threadsperblocky 1
#ifndef blockspergrid
#define blockspergrid nevts*nb/num_streams
#endif
#define HOSTDEV __host__ __device__
HOSTDEV size_t PosInMtrx(size_t i, size_t j, size_t D) {
return i*D+j;
}
HOSTDEV size_t SymOffsets33(size_t i) {
const size_t offs[9] = {0, 1, 3, 1, 2, 4, 3, 4, 5};
return offs[i];
}
HOSTDEV size_t SymOffsets66(size_t i) {
const size_t offs[36] = {0, 1, 3, 6, 10, 15, 1, 2, 4, 7, 11, 16, 3, 4, 5, 8, 12, 17, 6, 7, 8, 9, 13, 18, 10, 11, 12, 13, 14, 19, 15, 16, 17, 18, 19, 20};
return offs[i];
}
struct ATRK {
float par[6];
float cov[21];
int q;
// int hitidx[22];
};
struct AHIT {
float pos[3];
float cov[6];
};
struct MP1I {
int data[1*bsize];
};
struct MP22I {
int data[22*bsize];
};
struct MP3F {
float data[3*bsize];
};
struct MP6F {
float data[6*bsize];
};
struct MP3x3 {
float data[9*bsize];
};
struct MP3x6 {
float data[18*bsize];
};
struct MP3x3SF {
float data[6*bsize];
};
struct MP6x6SF {
float data[21*bsize];
};
struct MP6x6F {
float data[36*bsize];
};
struct MPTRK {
MP6F par;
MP6x6SF cov;
MP1I q;
// MP22I hitidx;
};
struct MPHIT {
MP3F pos;
MP3x3SF cov;
};
float randn(float mu, float sigma) {
float U1, U2, W, mult;
static float X1, X2;
static int call = 0;
if (call == 1) {
call = !call;
return (mu + sigma * (float) X2);
} do {
U1 = -1 + ((float) rand () / RAND_MAX) * 2;
U2 = -1 + ((float) rand () / RAND_MAX) * 2;
W = pow (U1, 2) + pow (U2, 2);
}
while (W >= 1 || W == 0);
mult = sqrt ((-2 * log (W)) / W);
X1 = U1 * mult;
X2 = U2 * mult;
call = !call;
return (mu + sigma * (float) X1);
}
MPTRK* prepareTracks(ATRK inputtrk) {
MPTRK* result;
cudaMallocManaged((void**)&result,nevts*nb*sizeof(MPTRK));
for (size_t ie=0;ie<nevts;++ie) {
for (size_t ib=0;ib<nb;++ib) {
for (size_t it=0;it<bsize;++it) {
//par
for (size_t ip=0;ip<6;++ip) {
result[ib + nb*ie].par.data[it + ip*bsize] = (1+smear*randn(0,1))*inputtrk.par[ip];
}
//cov
for (size_t ip=0;ip<21;++ip) {
result[ib + nb*ie].cov.data[it + ip*bsize] = (1+smear*randn(0,1))*inputtrk.cov[ip];
}
//q
result[ib + nb*ie].q.data[it] = inputtrk.q-2*ceil(-0.5 + (float)rand() / RAND_MAX);//fixme check
}
}
}
return result;
}
MPHIT* prepareHits(AHIT inputhit) {
MPHIT* result;
cudaMallocManaged((void**)&result,nlayer*nevts*nb*sizeof(MPHIT));
for (size_t lay=0;lay<nlayer;++lay) {
for (size_t ie=0;ie<nevts;++ie) {
for (size_t ib=0;ib<nb;++ib) {
for (size_t it=0;it<bsize;++it) {
//pos
for (size_t ip=0;ip<3;++ip) {
result[lay+nlayer*(ib + nb*ie)].pos.data[it + ip*bsize] = (1+smear*randn(0,1))*inputhit.pos[ip];
}
//cov
for (size_t ip=0;ip<6;++ip) {
result[lay+nlayer*(ib + nb*ie)].cov.data[it + ip*bsize] = (1+smear*randn(0,1))*inputhit.cov[ip];
}
}
}
}
}
return result;
}
HOSTDEV MPTRK* bTk(MPTRK* tracks, size_t ev, size_t ib) {
return &(tracks[ib + nb*ev]);
}
HOSTDEV const MPTRK* bTk(const MPTRK* tracks, size_t ev, size_t ib) {
return &(tracks[ib + nb*ev]);
}
HOSTDEV float q(const MP1I* bq, size_t it){
return (*bq).data[it];
}
HOSTDEV float par(const MP6F* bpars, size_t it, size_t ipar){
return (*bpars).data[it + ipar*bsize];
}
HOSTDEV float x (const MP6F* bpars, size_t it){ return par(bpars, it, 0); }
HOSTDEV float y (const MP6F* bpars, size_t it){ return par(bpars, it, 1); }
HOSTDEV float z (const MP6F* bpars, size_t it){ return par(bpars, it, 2); }
HOSTDEV float ipt (const MP6F* bpars, size_t it){ return par(bpars, it, 3); }
HOSTDEV float phi (const MP6F* bpars, size_t it){ return par(bpars, it, 4); }
HOSTDEV float theta(const MP6F* bpars, size_t it){ return par(bpars, it, 5); }
HOSTDEV float par(const MPTRK* btracks, size_t it, size_t ipar){
return par(&(*btracks).par,it,ipar);
}
HOSTDEV float x (const MPTRK* btracks, size_t it){ return par(btracks, it, 0); }
HOSTDEV float y (const MPTRK* btracks, size_t it){ return par(btracks, it, 1); }
HOSTDEV float z (const MPTRK* btracks, size_t it){ return par(btracks, it, 2); }
HOSTDEV float ipt (const MPTRK* btracks, size_t it){ return par(btracks, it, 3); }
HOSTDEV float phi (const MPTRK* btracks, size_t it){ return par(btracks, it, 4); }
HOSTDEV float theta(const MPTRK* btracks, size_t it){ return par(btracks, it, 5); }
HOSTDEV float par(const MPTRK* tracks, size_t ev, size_t tk, size_t ipar){
size_t ib = tk/bsize;
const MPTRK* btracks = bTk(tracks, ev, ib);
size_t it = tk % bsize;
return par(btracks, it, ipar);
}
HOSTDEV float x (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 0); }
HOSTDEV float y (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 1); }
HOSTDEV float z (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 2); }
HOSTDEV float ipt (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 3); }
HOSTDEV float phi (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 4); }
HOSTDEV float theta(const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 5); }
HOSTDEV void setpar(MP6F* bpars, size_t it, size_t ipar, float val){
(*bpars).data[it + ipar*bsize] = val;
}
HOSTDEV void setx (MP6F* bpars, size_t it, float val){ setpar(bpars, it, 0, val); }
HOSTDEV void sety (MP6F* bpars, size_t it, float val){ setpar(bpars, it, 1, val); }
HOSTDEV void setz (MP6F* bpars, size_t it, float val){ setpar(bpars, it, 2, val); }
HOSTDEV void setipt (MP6F* bpars, size_t it, float val){ setpar(bpars, it, 3, val); }
HOSTDEV void setphi (MP6F* bpars, size_t it, float val){ setpar(bpars, it, 4, val); }
HOSTDEV void settheta(MP6F* bpars, size_t it, float val){ setpar(bpars, it, 5, val); }
HOSTDEV void setpar(MPTRK* btracks, size_t it, size_t ipar, float val){
setpar(&(*btracks).par,it,ipar,val);
}
HOSTDEV void setx (MPTRK* btracks, size_t it, float val){ setpar(btracks, it, 0, val); }
HOSTDEV void sety (MPTRK* btracks, size_t it, float val){ setpar(btracks, it, 1, val); }
HOSTDEV void setz (MPTRK* btracks, size_t it, float val){ setpar(btracks, it, 2, val); }
HOSTDEV void setipt (MPTRK* btracks, size_t it, float val){ setpar(btracks, it, 3, val); }
HOSTDEV void setphi (MPTRK* btracks, size_t it, float val){ setpar(btracks, it, 4, val); }
HOSTDEV void settheta(MPTRK* btracks, size_t it, float val){ setpar(btracks, it, 5, val); }
HOSTDEV MPHIT* bHit(MPHIT* hits, size_t ev, size_t ib) {
return &(hits[ib + nb*ev]);
}
HOSTDEV const MPHIT* bHit(const MPHIT* hits, size_t ev, size_t ib) {
return &(hits[ib + nb*ev]);
}
HOSTDEV const MPHIT* bHit(const MPHIT* hits, size_t ev, size_t ib,int lay) {
return &(hits[lay + (ib*nlayer) +(ev*nlayer*nb)]);
}
HOSTDEV float pos(const MP3F* hpos, size_t it, size_t ipar){
return (*hpos).data[it + ipar*bsize];
}
HOSTDEV float x(const MP3F* hpos, size_t it) { return pos(hpos, it, 0); }
HOSTDEV float y(const MP3F* hpos, size_t it) { return pos(hpos, it, 1); }
HOSTDEV float z(const MP3F* hpos, size_t it) { return pos(hpos, it, 2); }
HOSTDEV float pos(const MPHIT* hits, size_t it, size_t ipar){
return pos(&(*hits).pos,it,ipar);
}
HOSTDEV float x(const MPHIT* hits, size_t it) { return pos(hits, it, 0); }
HOSTDEV float y(const MPHIT* hits, size_t it) { return pos(hits, it, 1); }
HOSTDEV float z(const MPHIT* hits, size_t it) { return pos(hits, it, 2); }
HOSTDEV float pos(const MPHIT* hits, size_t ev, size_t tk, size_t ipar){
size_t ib = tk/bsize;
//[DEBUG by Seyong on Dec. 28, 2020] add 4th argument(nlayer-1) to bHit() below.
const MPHIT* bhits = bHit(hits, ev, ib, nlayer-1);
size_t it = tk % bsize;
return pos(bhits,it,ipar);
}
HOSTDEV float x(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 0); }
HOSTDEV float y(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 1); }
HOSTDEV float z(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 2); }
#define N bsize
__forceinline__ __device__ void MultHelixPropEndcap(const MP6x6F* A, const MP6x6SF* B, MP6x6F* C) {
const float* a; //ASSUME_ALIGNED(a, 64);
const float* b; //ASSUME_ALIGNED(b, 64);
float* c; //ASSUME_ALIGNED(c, 64);
a = A->data; //ASSUME_ALIGNED(a, 64);
b = B->data; //ASSUME_ALIGNED(b, 64);
c = C->data; //ASSUME_ALIGNED(c, 64);
for(int n=threadIdx.x;n<N;n+=blockDim.x)
{
c[ 0*N+n] = b[ 0*N+n] + a[ 2*N+n]*b[ 3*N+n] + a[ 3*N+n]*b[ 6*N+n] + a[ 4*N+n]*b[10*N+n] + a[ 5*N+n]*b[15*N+n];
c[ 1*N+n] = b[ 1*N+n] + a[ 2*N+n]*b[ 4*N+n] + a[ 3*N+n]*b[ 7*N+n] + a[ 4*N+n]*b[11*N+n] + a[ 5*N+n]*b[16*N+n];
c[ 2*N+n] = b[ 3*N+n] + a[ 2*N+n]*b[ 5*N+n] + a[ 3*N+n]*b[ 8*N+n] + a[ 4*N+n]*b[12*N+n] + a[ 5*N+n]*b[17*N+n];
c[ 3*N+n] = b[ 6*N+n] + a[ 2*N+n]*b[ 8*N+n] + a[ 3*N+n]*b[ 9*N+n] + a[ 4*N+n]*b[13*N+n] + a[ 5*N+n]*b[18*N+n];
c[ 4*N+n] = b[10*N+n] + a[ 2*N+n]*b[12*N+n] + a[ 3*N+n]*b[13*N+n] + a[ 4*N+n]*b[14*N+n] + a[ 5*N+n]*b[19*N+n];
c[ 5*N+n] = b[15*N+n] + a[ 2*N+n]*b[17*N+n] + a[ 3*N+n]*b[18*N+n] + a[ 4*N+n]*b[19*N+n] + a[ 5*N+n]*b[20*N+n];
c[ 6*N+n] = b[ 1*N+n] + a[ 8*N+n]*b[ 3*N+n] + a[ 9*N+n]*b[ 6*N+n] + a[10*N+n]*b[10*N+n] + a[11*N+n]*b[15*N+n];
c[ 7*N+n] = b[ 2*N+n] + a[ 8*N+n]*b[ 4*N+n] + a[ 9*N+n]*b[ 7*N+n] + a[10*N+n]*b[11*N+n] + a[11*N+n]*b[16*N+n];
c[ 8*N+n] = b[ 4*N+n] + a[ 8*N+n]*b[ 5*N+n] + a[ 9*N+n]*b[ 8*N+n] + a[10*N+n]*b[12*N+n] + a[11*N+n]*b[17*N+n];
c[ 9*N+n] = b[ 7*N+n] + a[ 8*N+n]*b[ 8*N+n] + a[ 9*N+n]*b[ 9*N+n] + a[10*N+n]*b[13*N+n] + a[11*N+n]*b[18*N+n];
c[10*N+n] = b[11*N+n] + a[ 8*N+n]*b[12*N+n] + a[ 9*N+n]*b[13*N+n] + a[10*N+n]*b[14*N+n] + a[11*N+n]*b[19*N+n];
c[11*N+n] = b[16*N+n] + a[ 8*N+n]*b[17*N+n] + a[ 9*N+n]*b[18*N+n] + a[10*N+n]*b[19*N+n] + a[11*N+n]*b[20*N+n];
c[12*N+n] = 0;
c[13*N+n] = 0;
c[14*N+n] = 0;
c[15*N+n] = 0;
c[16*N+n] = 0;
c[17*N+n] = 0;
c[18*N+n] = b[ 6*N+n];
c[19*N+n] = b[ 7*N+n];
c[20*N+n] = b[ 8*N+n];
c[21*N+n] = b[ 9*N+n];
c[22*N+n] = b[13*N+n];
c[23*N+n] = b[18*N+n];
c[24*N+n] = a[26*N+n]*b[ 3*N+n] + a[27*N+n]*b[ 6*N+n] + b[10*N+n] + a[29*N+n]*b[15*N+n];
c[25*N+n] = a[26*N+n]*b[ 4*N+n] + a[27*N+n]*b[ 7*N+n] + b[11*N+n] + a[29*N+n]*b[16*N+n];
c[26*N+n] = a[26*N+n]*b[ 5*N+n] + a[27*N+n]*b[ 8*N+n] + b[12*N+n] + a[29*N+n]*b[17*N+n];
c[27*N+n] = a[26*N+n]*b[ 8*N+n] + a[27*N+n]*b[ 9*N+n] + b[13*N+n] + a[29*N+n]*b[18*N+n];
c[28*N+n] = a[26*N+n]*b[12*N+n] + a[27*N+n]*b[13*N+n] + b[14*N+n] + a[29*N+n]*b[19*N+n];
c[29*N+n] = a[26*N+n]*b[17*N+n] + a[27*N+n]*b[18*N+n] + b[19*N+n] + a[29*N+n]*b[20*N+n];
c[30*N+n] = b[15*N+n];
c[31*N+n] = b[16*N+n];
c[32*N+n] = b[17*N+n];
c[33*N+n] = b[18*N+n];
c[34*N+n] = b[19*N+n];
c[35*N+n] = b[20*N+n];
}
}
__forceinline__ __device__ void MultHelixPropTranspEndcap(MP6x6F* A, MP6x6F* B, MP6x6SF* C) {
const float* a; //ASSUME_ALIGNED(a, 64);
const float* b; //ASSUME_ALIGNED(b, 64);
float* c; //ASSUME_ALIGNED(c, 64);
a = A->data; //ASSUME_ALIGNED(a, 64);
b = B->data; //ASSUME_ALIGNED(b, 64);
c = C->data; //ASSUME_ALIGNED(c, 64);
for(int n=threadIdx.x;n<N;n+=blockDim.x)
{
c[ 0*N+n] = b[ 0*N+n] + b[ 2*N+n]*a[ 2*N+n] + b[ 3*N+n]*a[ 3*N+n] + b[ 4*N+n]*a[ 4*N+n] + b[ 5*N+n]*a[ 5*N+n];
c[ 1*N+n] = b[ 6*N+n] + b[ 8*N+n]*a[ 2*N+n] + b[ 9*N+n]*a[ 3*N+n] + b[10*N+n]*a[ 4*N+n] + b[11*N+n]*a[ 5*N+n];
c[ 2*N+n] = b[ 7*N+n] + b[ 8*N+n]*a[ 8*N+n] + b[ 9*N+n]*a[ 9*N+n] + b[10*N+n]*a[10*N+n] + b[11*N+n]*a[11*N+n];
c[ 3*N+n] = b[12*N+n] + b[14*N+n]*a[ 2*N+n] + b[15*N+n]*a[ 3*N+n] + b[16*N+n]*a[ 4*N+n] + b[17*N+n]*a[ 5*N+n];
c[ 4*N+n] = b[13*N+n] + b[14*N+n]*a[ 8*N+n] + b[15*N+n]*a[ 9*N+n] + b[16*N+n]*a[10*N+n] + b[17*N+n]*a[11*N+n];
c[ 5*N+n] = 0;
c[ 6*N+n] = b[18*N+n] + b[20*N+n]*a[ 2*N+n] + b[21*N+n]*a[ 3*N+n] + b[22*N+n]*a[ 4*N+n] + b[23*N+n]*a[ 5*N+n];
c[ 7*N+n] = b[19*N+n] + b[20*N+n]*a[ 8*N+n] + b[21*N+n]*a[ 9*N+n] + b[22*N+n]*a[10*N+n] + b[23*N+n]*a[11*N+n];
c[ 8*N+n] = 0;
c[ 9*N+n] = b[21*N+n];
c[10*N+n] = b[24*N+n] + b[26*N+n]*a[ 2*N+n] + b[27*N+n]*a[ 3*N+n] + b[28*N+n]*a[ 4*N+n] + b[29*N+n]*a[ 5*N+n];
c[11*N+n] = b[25*N+n] + b[26*N+n]*a[ 8*N+n] + b[27*N+n]*a[ 9*N+n] + b[28*N+n]*a[10*N+n] + b[29*N+n]*a[11*N+n];
c[12*N+n] = 0;
c[13*N+n] = b[27*N+n];
c[14*N+n] = b[26*N+n]*a[26*N+n] + b[27*N+n]*a[27*N+n] + b[28*N+n] + b[29*N+n]*a[29*N+n];
c[15*N+n] = b[30*N+n] + b[32*N+n]*a[ 2*N+n] + b[33*N+n]*a[ 3*N+n] + b[34*N+n]*a[ 4*N+n] + b[35*N+n]*a[ 5*N+n];
c[16*N+n] = b[31*N+n] + b[32*N+n]*a[ 8*N+n] + b[33*N+n]*a[ 9*N+n] + b[34*N+n]*a[10*N+n] + b[35*N+n]*a[11*N+n];
c[17*N+n] = 0;
c[18*N+n] = b[33*N+n];
c[19*N+n] = b[32*N+n]*a[26*N+n] + b[33*N+n]*a[27*N+n] + b[34*N+n] + b[35*N+n]*a[29*N+n];
c[20*N+n] = b[35*N+n];
}
}
__forceinline__ __device__ void KalmanGainInv(const MP6x6SF* A, const MP3x3SF* B, MP3x3* C) {
// k = P Ht(HPHt + R)^-1
// HpHt -> cov of x,y,z. take upper 3x3 matrix of P
// This calculates the inverse of HpHt +R
const float* a; //ASSUME_ALIGNED(a, 64);
const float* b; //ASSUME_ALIGNED(b, 64);
float* c; //ASSUME_ALIGNED(c, 64);
a = (*A).data; //ASSUME_ALIGNED(a, 64);
b = (*B).data; //ASSUME_ALIGNED(b, 64);
c = (*C).data; //ASSUME_ALIGNED(c, 64);
for(int n=threadIdx.x;n<N;n+=blockDim.x)
{
double det =
((a[0*N+n]+b[0*N+n])*(((a[ 6*N+n]+b[ 3*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[7*N+n]+b[4*N+n]) *(a[7*N+n]+b[4*N+n])))) -
((a[1*N+n]+b[1*N+n])*(((a[ 1*N+n]+b[ 1*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[7*N+n]+b[4*N+n]) *(a[2*N+n]+b[2*N+n])))) +
((a[2*N+n]+b[2*N+n])*(((a[ 1*N+n]+b[ 1*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[6*N+n]+b[3*N+n]))));
double invdet = 1.0/det;
c[ 0*N+n] = invdet*(((a[ 6*N+n]+b[ 3*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[7*N+n]+b[4*N+n]) *(a[7*N+n]+b[4*N+n])));
c[ 1*N+n] = -1*invdet*(((a[ 1*N+n]+b[ 1*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[7*N+n]+b[4*N+n])));
c[ 2*N+n] = invdet*(((a[ 1*N+n]+b[ 1*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[7*N+n]+b[4*N+n])));
c[ 3*N+n] = -1*invdet*(((a[ 1*N+n]+b[ 1*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[7*N+n]+b[4*N+n]) *(a[2*N+n]+b[2*N+n])));
c[ 4*N+n] = invdet*(((a[ 0*N+n]+b[ 0*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[2*N+n]+b[2*N+n])));
c[ 5*N+n] = -1*invdet*(((a[ 0*N+n]+b[ 0*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[1*N+n]+b[1*N+n])));
c[ 6*N+n] = invdet*(((a[ 1*N+n]+b[ 1*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[6*N+n]+b[3*N+n])));
c[ 7*N+n] = -1*invdet*(((a[ 0*N+n]+b[ 0*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[1*N+n]+b[1*N+n])));
c[ 8*N+n] = invdet*(((a[ 0*N+n]+b[ 0*N+n]) *(a[6*N+n]+b[3*N+n])) - ((a[1*N+n]+b[1*N+n]) *(a[1*N+n]+b[1*N+n])));
}
}
__forceinline__ __device__ void KalmanGain(const MP6x6SF* A, const MP3x3* B, MP3x6* C) {
// k = P Ht(HPHt + R)^-1
// HpHt -> cov of x,y,z. take upper 3x3 matrix of P
// This calculates the kalman gain
const float* a; //ASSUME_ALIGNED(a, 64);
const float* b; //ASSUME_ALIGNED(b, 64);
float* c; //ASSUME_ALIGNED(c, 64);
a = (*A).data; //ASSUME_ALIGNED(a, 64);
b = (*B).data; //ASSUME_ALIGNED(b, 64);
c = (*C).data; //ASSUME_ALIGNED(c, 64);
for(int n=threadIdx.x;n<N;n+=blockDim.x)
{
c[ 0*N+n] = a[0*N+n]*b[0*N+n] + a[1*N+n]*b[3*N+n] + a[2*N+n]*b[6*N+n];
c[ 1*N+n] = a[0*N+n]*b[1*N+n] + a[1*N+n]*b[4*N+n] + a[2*N+n]*b[7*N+n];
c[ 2*N+n] = a[0*N+n]*b[2*N+n] + a[1*N+n]*b[5*N+n] + a[2*N+n]*b[8*N+n];
c[ 3*N+n] = a[1*N+n]*b[0*N+n] + a[6*N+n]*b[3*N+n] + a[7*N+n]*b[6*N+n];
c[ 4*N+n] = a[1*N+n]*b[1*N+n] + a[6*N+n]*b[4*N+n] + a[7*N+n]*b[7*N+n];
c[ 5*N+n] = a[1*N+n]*b[2*N+n] + a[6*N+n]*b[5*N+n] + a[7*N+n]*b[8*N+n];
c[ 6*N+n] = a[2*N+n]*b[0*N+n] + a[7*N+n]*b[3*N+n] + a[11*N+n]*b[6*N+n];
c[ 7*N+n] = a[2*N+n]*b[1*N+n] + a[7*N+n]*b[4*N+n] + a[11*N+n]*b[7*N+n];
c[ 8*N+n] = a[2*N+n]*b[2*N+n] + a[7*N+n]*b[5*N+n] + a[11*N+n]*b[8*N+n];
c[ 9*N+n] = a[3*N+n]*b[0*N+n] + a[8*N+n]*b[3*N+n] + a[12*N+n]*b[6*N+n];
c[ 10*N+n] = a[3*N+n]*b[1*N+n] + a[8*N+n]*b[4*N+n] + a[12*N+n]*b[7*N+n];
c[ 11*N+n] = a[3*N+n]*b[2*N+n] + a[8*N+n]*b[5*N+n] + a[12*N+n]*b[8*N+n];
c[ 12*N+n] = a[4*N+n]*b[0*N+n] + a[9*N+n]*b[3*N+n] + a[13*N+n]*b[6*N+n];
c[ 13*N+n] = a[4*N+n]*b[1*N+n] + a[9*N+n]*b[4*N+n] + a[13*N+n]*b[7*N+n];
c[ 14*N+n] = a[4*N+n]*b[2*N+n] + a[9*N+n]*b[5*N+n] + a[13*N+n]*b[8*N+n];
c[ 15*N+n] = a[5*N+n]*b[0*N+n] + a[10*N+n]*b[3*N+n] + a[14*N+n]*b[6*N+n];
c[ 16*N+n] = a[5*N+n]*b[1*N+n] + a[10*N+n]*b[4*N+n] + a[14*N+n]*b[7*N+n];
c[ 17*N+n] = a[5*N+n]*b[2*N+n] + a[10*N+n]*b[5*N+n] + a[14*N+n]*b[8*N+n];
}
}
__forceinline__ __device__ void KalmanUpdate(MP6x6SF* trkErr, MP6F* inPar, const MP3x3SF* hitErr, const MP3F* msP){
__shared__ MP3x3 inverse_temp;
__shared__ MP3x6 kGain;
__shared__ MP6x6SF newErr;
KalmanGainInv(trkErr,hitErr,&inverse_temp);
KalmanGain(trkErr,&inverse_temp,&kGain);
for(size_t it=threadIdx.x;it<bsize;it+=blockDim.x){
const float xin = x(inPar,it);
const float yin = y(inPar,it);
const float zin = z(inPar,it);
const float ptin = 1.0f/ipt(inPar,it); // is this pt or ipt?
const float phiin = phi(inPar,it);
const float thetain = theta(inPar,it);
const float xout = x(msP,it);
const float yout = y(msP,it);
const float zout = z(msP,it);
float xnew = xin + (kGain.data[0*bsize+it]*(xout-xin)) +(kGain.data[1*bsize+it]*(yout-yin));
float ynew = yin + (kGain.data[3*bsize+it]*(xout-xin)) +(kGain.data[4*bsize+it]*(yout-yin));
float znew = zin + (kGain.data[6*bsize+it]*(xout-xin)) +(kGain.data[7*bsize+it]*(yout-yin));
float ptnew = ptin + (kGain.data[9*bsize+it]*(xout-xin)) +(kGain.data[10*bsize+it]*(yout-yin));
float phinew = phiin + (kGain.data[12*bsize+it]*(xout-xin)) +(kGain.data[13*bsize+it]*(yout-yin));
float thetanew = thetain + (kGain.data[15*bsize+it]*(xout-xin)) +(kGain.data[16*bsize+it]*(yout-yin));
newErr.data[0*bsize+it] = trkErr->data[0*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[0*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[1*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[2*bsize+it]);
newErr.data[1*bsize+it] = trkErr->data[1*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[1*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[6*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[7*bsize+it]);
newErr.data[2*bsize+it] = trkErr->data[2*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[2*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[7*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[11*bsize+it]);
newErr.data[3*bsize+it] = trkErr->data[3*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[3*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[8*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[12*bsize+it]);
newErr.data[4*bsize+it] = trkErr->data[4*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[13*bsize+it]);
newErr.data[5*bsize+it] = trkErr->data[5*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[14*bsize+it]);
newErr.data[6*bsize+it] = trkErr->data[6*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[1*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[6*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[7*bsize+it]);
newErr.data[7*bsize+it] = trkErr->data[7*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[2*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[7*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[11*bsize+it]);
newErr.data[8*bsize+it] = trkErr->data[8*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[3*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[8*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[12*bsize+it]);
newErr.data[9*bsize+it] = trkErr->data[9*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[13*bsize+it]);
newErr.data[10*bsize+it] = trkErr->data[10*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[14*bsize+it]);
newErr.data[11*bsize+it] = trkErr->data[11*bsize+it] - (kGain.data[6*bsize+it]*trkErr->data[2*bsize+it]+kGain.data[7*bsize+it]*trkErr->data[7*bsize+it]+kGain.data[8*bsize+it]*trkErr->data[11*bsize+it]);
newErr.data[12*bsize+it] = trkErr->data[12*bsize+it] - (kGain.data[6*bsize+it]*trkErr->data[3*bsize+it]+kGain.data[7*bsize+it]*trkErr->data[8*bsize+it]+kGain.data[8*bsize+it]*trkErr->data[12*bsize+it]);
newErr.data[13*bsize+it] = trkErr->data[13*bsize+it] - (kGain.data[6*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[7*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[8*bsize+it]*trkErr->data[13*bsize+it]);
newErr.data[14*bsize+it] = trkErr->data[14*bsize+it] - (kGain.data[6*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[7*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[8*bsize+it]*trkErr->data[14*bsize+it]);
newErr.data[15*bsize+it] = trkErr->data[15*bsize+it] - (kGain.data[9*bsize+it]*trkErr->data[3*bsize+it]+kGain.data[10*bsize+it]*trkErr->data[8*bsize+it]+kGain.data[11*bsize+it]*trkErr->data[12*bsize+it]);
newErr.data[16*bsize+it] = trkErr->data[16*bsize+it] - (kGain.data[9*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[10*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[11*bsize+it]*trkErr->data[13*bsize+it]);
newErr.data[17*bsize+it] = trkErr->data[17*bsize+it] - (kGain.data[9*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[10*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[11*bsize+it]*trkErr->data[14*bsize+it]);
newErr.data[18*bsize+it] = trkErr->data[18*bsize+it] - (kGain.data[12*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[13*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[14*bsize+it]*trkErr->data[13*bsize+it]);
newErr.data[19*bsize+it] = trkErr->data[19*bsize+it] - (kGain.data[12*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[13*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[14*bsize+it]*trkErr->data[14*bsize+it]);
newErr.data[20*bsize+it] = trkErr->data[20*bsize+it] - (kGain.data[15*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[16*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[17*bsize+it]*trkErr->data[14*bsize+it]);
setx(inPar,it,xnew );
sety(inPar,it,ynew );
setz(inPar,it,znew);
setipt(inPar,it, ptnew);
setphi(inPar,it, phinew);
settheta(inPar,it, thetanew);
}
for(size_t it=threadIdx.x;it<bsize;it+=blockDim.x){
#pragma unroll
for (int i = 0; i < 21; i++){
trkErr->data[ i*bsize+it] = trkErr->data[ i*bsize+it] - newErr.data[ i*bsize+it];
}
}
}
__device__ __constant__ float kfact = 100/3.8;
__device__ __forceinline__ void propagateToZ(const MP6x6SF* inErr, const MP6F* inPar, const MP1I* inChg,const MP3F* msP,
MP6x6SF* outErr, MP6F* outPar, struct MP6x6F* errorProp, struct MP6x6F* temp) {
//struct MP6x6F* errorProp, temp;
for(size_t it=threadIdx.x;it<bsize;it+=blockDim.x){
const float zout = z(msP,it);
const float k = q(inChg,it)*kfact;//*100/3.8;
const float deltaZ = zout - z(inPar,it);
const float pt = 1.0f/ipt(inPar,it);
const float cosP = cosf(phi(inPar,it));
const float sinP = sinf(phi(inPar,it));
const float cosT = cosf(theta(inPar,it));
const float sinT = sinf(theta(inPar,it));
const float pxin = cosP*pt;
const float pyin = sinP*pt;
const float icosT = 1.0f/cosT;
const float icosTk = icosT/k;
const float alpha = deltaZ*sinT*ipt(inPar,it)*icosTk;
//const float alpha = deltaZ*sinT*ipt(inPar,it)/(cosT*k);
const float sina = sinf(alpha); // this can be approximated;
const float cosa = cosf(alpha); // this can be approximated;
setx(outPar,it, x(inPar,it) + k*(pxin*sina - pyin*(1.0f-cosa)) );
sety(outPar,it, y(inPar,it) + k*(pyin*sina + pxin*(1.0f-cosa)) );
setz(outPar,it,zout);
setipt(outPar,it, ipt(inPar,it));
setphi(outPar,it, phi(inPar,it)+alpha );
settheta(outPar,it, theta(inPar,it) );
const float sCosPsina = sinf(cosP*sina);
const float cCosPsina = cosf(cosP*sina);
for (size_t i=0;i<6;++i) errorProp->data[bsize*PosInMtrx(i,i,6) + it] = 1.0f;
//[Dec. 21, 2022] Added to have the same pattern as the cudauvm version.
errorProp->data[bsize*PosInMtrx(0,1,6) + it] = 0.0f;
errorProp->data[bsize*PosInMtrx(0,2,6) + it] = cosP*sinT*(sinP*cosa*sCosPsina-cosa)*icosT;
errorProp->data[bsize*PosInMtrx(0,3,6) + it] = cosP*sinT*deltaZ*cosa*(1.0f-sinP*sCosPsina)*(icosT*pt)-k*(cosP*sina-sinP*(1.0f-cCosPsina))*(pt*pt);
errorProp->data[bsize*PosInMtrx(0,4,6) + it] = (k*pt)*(-sinP*sina+sinP*sinP*sina*sCosPsina-cosP*(1.0f-cCosPsina));
errorProp->data[bsize*PosInMtrx(0,5,6) + it] = cosP*deltaZ*cosa*(1.0f-sinP*sCosPsina)*(icosT*icosT);
errorProp->data[bsize*PosInMtrx(1,2,6) + it] = cosa*sinT*(cosP*cosP*sCosPsina-sinP)*icosT;
errorProp->data[bsize*PosInMtrx(1,3,6) + it] = sinT*deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)*(icosT*pt)-k*(sinP*sina+cosP*(1.0f-cCosPsina))*(pt*pt);
errorProp->data[bsize*PosInMtrx(1,4,6) + it] = (k*pt)*(-sinP*(1.0f-cCosPsina)-sinP*cosP*sina*sCosPsina+cosP*sina);
errorProp->data[bsize*PosInMtrx(1,5,6) + it] = deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)*(icosT*icosT);
errorProp->data[bsize*PosInMtrx(4,2,6) + it] = -ipt(inPar,it)*sinT*(icosTk);
errorProp->data[bsize*PosInMtrx(4,3,6) + it] = sinT*deltaZ*(icosTk);
errorProp->data[bsize*PosInMtrx(4,5,6) + it] = ipt(inPar,it)*deltaZ*(icosT*icosTk);
//errorProp->data[bsize*PosInMtrx(0,2,6) + it] = cosP*sinT*(sinP*cosa*sCosPsina-cosa)/cosT;
//errorProp->data[bsize*PosInMtrx(0,3,6) + it] = cosP*sinT*deltaZ*cosa*(1.0f-sinP*sCosPsina)/(cosT*ipt(inPar,it))-k*(cosP*sina-sinP*(1.0f-cCosPsina))/(ipt(inPar,it)*ipt(inPar,it));
//errorProp->data[bsize*PosInMtrx(0,4,6) + it] = (k/ipt(inPar,it))*(-sinP*sina+sinP*sinP*sina*sCosPsina-cosP*(1.0f-cCosPsina));
//errorProp->data[bsize*PosInMtrx(0,5,6) + it] = cosP*deltaZ*cosa*(1.0f-sinP*sCosPsina)/(cosT*cosT);
//errorProp->data[bsize*PosInMtrx(1,2,6) + it] = cosa*sinT*(cosP*cosP*sCosPsina-sinP)/cosT;
//errorProp->data[bsize*PosInMtrx(1,3,6) + it] = sinT*deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)/(cosT*ipt(inPar,it))-k*(sinP*sina+cosP*(1.0f-cCosPsina))/(ipt(inPar,it)*ipt(inPar,it));
//errorProp->data[bsize*PosInMtrx(1,4,6) + it] = (k/ipt(inPar,it))*(-sinP*(1.0f-cCosPsina)-sinP*cosP*sina*sCosPsina+cosP*sina);
//errorProp->data[bsize*PosInMtrx(1,5,6) + it] = deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)/(cosT*cosT);
//errorProp->data[bsize*PosInMtrx(4,2,6) + it] = -ipt(inPar,it)*sinT/(cosT*k);
//errorProp->data[bsize*PosInMtrx(4,3,6) + it] = sinT*deltaZ/(cosT*k);
//errorProp->data[bsize*PosInMtrx(4,5,6) + it] = ipt(inPar,it)*deltaZ/(cosT*cosT*k);
}
MultHelixPropEndcap(errorProp, inErr, temp);
MultHelixPropTranspEndcap(errorProp, temp, outErr);
}
__device__ __constant__ int ie_range = (int) nevts/num_streams;
__device__ __constant__ int ie_rangeR = (int) nevts%num_streams;
__global__ void GPUsequence(MPTRK* trk, MPHIT* hit, MPTRK* outtrk, const int stream){
__shared__ struct MP6x6F errorProp, temp;
const MPTRK* __shared__ btracks;
MPTRK* __shared__ obtracks;
const MPHIT* __shared__ bhits;
__shared__ int ie;
__shared__ int ib;
int ti;
int lnb = nb;
for (ti = blockIdx.x; ti<ie_range*nb; ti+=gridDim.x){
if(threadIdx.x == 0) {
ie = ti/lnb;
ib = ti%lnb;
btracks = bTk(trk,ie,ib);
obtracks = bTk(outtrk,ie,ib);
}
for (int layer=0;layer<nlayer;++layer){
if(threadIdx.x == 0) {
bhits = bHit(hit,ie,ib,layer);
}
__syncthreads();
propagateToZ(&(*btracks).cov, &(*btracks).par, &(*btracks).q, &(*bhits).pos,
&(*obtracks).cov, &(*obtracks).par, &errorProp, &temp);
KalmanUpdate(&(*obtracks).cov,&(*obtracks).par,&(*bhits).cov,&(*bhits).pos);
}
}
}
__global__ void GPUsequenceR(MPTRK* trk, MPHIT* hit, MPTRK* outtrk, const int stream){
__shared__ struct MP6x6F errorProp, temp;
const MPTRK* __shared__ btracks;
MPTRK* __shared__ obtracks;
const MPHIT* __shared__ bhits;
__shared__ size_t ie;
__shared__ size_t ib;
for (size_t ti = blockIdx.x; ti<ie_rangeR*nb; ti+=gridDim.x){
if(threadIdx.x == 0) {
ie = ti/nb;
ib = ti%nb;
btracks = bTk(trk,ie,ib);
obtracks = bTk(outtrk,ie,ib);
}
for (int layer=0;layer<nlayer;++layer){
if(threadIdx.x == 0) {
bhits = bHit(hit,ie,ib,layer);
}
__syncthreads();
propagateToZ(&(*btracks).cov, &(*btracks).par, &(*btracks).q, &(*bhits).pos,
&(*obtracks).cov, &(*obtracks).par, &errorProp, &temp);
KalmanUpdate(&(*obtracks).cov,&(*obtracks).par,&(*bhits).cov,&(*bhits).pos);
}
}
}
void prefetch_device(MPTRK* trk, MPHIT* hit, cudaStream_t* streams, int stream_chunk, int stream_remainder, int device) {
for (int s = 0; s<num_streams;s++){
#ifdef USE_ASYNC
cudaMemPrefetchAsync(trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK), device,streams[s]);
#else
cudaMemPrefetchAsync(trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK), device,0);
#endif
#ifdef USE_ASYNC
cudaMemPrefetchAsync(hit+(s*stream_chunk*nlayer),nlayer*stream_chunk*sizeof(MPHIT), device,streams[s]);
#else
cudaMemPrefetchAsync(hit+(s*stream_chunk*nlayer),nlayer*stream_chunk*sizeof(MPHIT), device,0);
#endif
}
if(stream_remainder != 0){
#ifdef USE_ASYNC
cudaMemPrefetchAsync(trk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK), device,streams[num_streams]);
#else
cudaMemPrefetchAsync(trk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK), device,0);
#endif
#ifdef USE_ASYNC
cudaMemPrefetchAsync(hit+(num_streams*stream_chunk*nlayer),nlayer*stream_remainder*sizeof(MPHIT), device,streams[num_streams]);
#else
cudaMemPrefetchAsync(hit+(num_streams*stream_chunk*nlayer),nlayer*stream_remainder*sizeof(MPHIT), device,0);
#endif
}
}
void prefetch_host(MPTRK* outtrk, cudaStream_t* streams, int stream_chunk, int stream_remainder) {
for (int s = 0; s<num_streams;s++){
#ifdef USE_ASYNC
cudaMemPrefetchAsync(outtrk+(s*stream_chunk),stream_chunk*sizeof(MPTRK), cudaCpuDeviceId,streams[s]);
#else
cudaMemPrefetchAsync(outtrk+(s*stream_chunk),stream_chunk*sizeof(MPTRK), cudaCpuDeviceId,0);
#endif
}
if(stream_remainder != 0){
#ifdef USE_ASYNC
cudaMemPrefetchAsync(outtrk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK), cudaCpuDeviceId,streams[num_streams]);
#else
cudaMemPrefetchAsync(outtrk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK), cudaCpuDeviceId,0);
#endif
}
}
int main (int argc, char* argv[]) {
#ifdef USE_ASYNC
printf("RUNNING CUDA Async Version!!\n");
#else
printf("RUNNING CUDA Sync Version!!\n");
#endif
#ifdef include_data
printf("Measure Both Memory Transfer Times and Compute Times!\n");
#else
printf("Measure Compute Times Only!\n");
#endif
printf("Streams: %d, blocks: %d, threads(x,y): (%d,%d)\n",num_streams,blockspergrid,threadsperblockx,threadsperblocky);
int itr;
ATRK inputtrk = {
{-12.806846618652344, -7.723824977874756, 38.13014221191406,0.23732035065189902, -2.613372802734375, 0.35594117641448975},
{6.290299552347278e-07,4.1375109560704004e-08,7.526661534029699e-07,2.0973730840978533e-07,1.5431574240665213e-07,9.626245400795597e-08,-2.804026640189443e-06,
6.219111130687595e-06,2.649119409845118e-07,0.00253512163402557,-2.419662877381737e-07,4.3124190760040646e-07,3.1068903991780678e-09,0.000923913115050627,
0.00040678296006807003,-7.755406890332818e-07,1.68539375883925e-06,6.676875566525437e-08,0.0008420574605423793,7.356584799406111e-05,0.0002306247719158348},
1
};
AHIT inputhit = {
{-20.7824649810791, -12.24150276184082, 57.8067626953125},
{2.545517190810642e-06,-2.6680759219743777e-06,2.8030024168401724e-06,0.00014160551654640585,0.00012282167153898627,11.385087966918945}
};
printf("track in pos: %f, %f, %f \n", inputtrk.par[0], inputtrk.par[1], inputtrk.par[2]);
printf("track in cov: %.2e, %.2e, %.2e \n", inputtrk.cov[SymOffsets66(PosInMtrx(0,0,6))],
inputtrk.cov[SymOffsets66(PosInMtrx(1,1,6))],
inputtrk.cov[SymOffsets66(PosInMtrx(2,2,6))]);
printf("hit in pos: %f %f %f \n", inputhit.pos[0], inputhit.pos[1], inputhit.pos[2]);
printf("produce nevts=%i ntrks=%i smearing by=%f \n", nevts, ntrks, smear);
printf("NITER=%d\n", NITER);
long setup_start, setup_stop;
struct timeval timecheck;
gettimeofday(&timecheck, NULL);
setup_start = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
#ifdef FIXED_RSEED
//[DEBUG by Seyong on Dec. 28, 2020] add an explicit srand(1) call to generate fixed inputs for better debugging.
srand(1);
#endif
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
// cudaFuncSetCacheConfig(GPUsequence,cudaFuncCachePreferL1);
// cudaFuncSetCacheConfig(GPUsequenceR,cudaFuncCachePreferL1);
MPTRK* trk = prepareTracks(inputtrk);
MPHIT* hit = prepareHits(inputhit);
MPTRK* outtrk;
cudaMallocManaged((void**)&outtrk,nevts*nb*sizeof(MPTRK));
dim3 grid(blockspergrid,1,1);
dim3 block(threadsperblockx,threadsperblocky,1);
int device = -1;
cudaGetDevice(&device);
int stream_chunk = ((int)(nevts/num_streams))*nb;//*sizeof(MPTRK);
int stream_remainder = ((int)(nevts%num_streams))*nb;//*sizeof(MPTRK);
int stream_range;
if (stream_remainder == 0){ stream_range =num_streams;}
else{stream_range = num_streams+1;}
cudaStream_t streams[stream_range];
for (int s = 0; s<stream_range;s++){
//cudaStreamCreateWithFlags(&streams[s],cudaStreamNonBlocking);
cudaStreamCreate(&streams[s]);
}
#ifndef include_data
prefetch_device(trk, hit, streams, stream_chunk, stream_remainder, device);
#ifdef USE_ASYNC
cudaDeviceSynchronize();
#endif
#endif
gettimeofday(&timecheck, NULL);
setup_stop = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
printf("done preparing!\n");
printf("Size of struct MPTRK trk[] = %ld\n", nevts*nb*sizeof(struct MPTRK));
printf("Size of struct MPTRK outtrk[] = %ld\n", nevts*nb*sizeof(struct MPTRK));
printf("Size of struct struct MPHIT hit[] = %ld\n", nevts*nb*sizeof(struct MPHIT));
auto wall_start = std::chrono::high_resolution_clock::now();
for(itr=0; itr<NITER; itr++){
#ifdef include_data
prefetch_device(trk, hit, streams, stream_chunk, stream_remainder, device);
#endif
for (int s = 0; s<num_streams;++s){
//printf("stream = %d, grid (%d, %d, %d), block(%d, %d, %d), stream_chunk = %d\n",s, grid.x, grid.y, grid.z, block.x, block.y, block.z, stream_chunk);
#ifdef USE_ASYNC
GPUsequence<<<grid,block,0,streams[s]>>>(trk+(s*stream_chunk),hit+(s*stream_chunk*nlayer),outtrk+(s*stream_chunk),s);
#else
GPUsequence<<<grid,block,0,0>>>(trk+(s*stream_chunk),hit+(s*stream_chunk*nlayer),outtrk+(s*stream_chunk),s);
#endif
}
if(stream_remainder != 0){
#ifdef USE_ASYNC
GPUsequenceR<<<grid,block,0,streams[num_streams]>>>(trk+(num_streams*stream_chunk),hit+(num_streams*stream_chunk*nlayer),outtrk+(num_streams*stream_chunk),num_streams);
#else
GPUsequenceR<<<grid,block,0,0>>>(trk+(num_streams*stream_chunk),hit+(num_streams*stream_chunk*nlayer),outtrk+(num_streams*stream_chunk),num_streams);
#endif
}
#ifdef include_data
prefetch_host(outtrk, streams, stream_chunk, stream_remainder);
#endif
} //end itr loop
cudaDeviceSynchronize();
auto wall_stop = std::chrono::high_resolution_clock::now();
#ifndef include_data
prefetch_host(outtrk, streams, stream_chunk, stream_remainder);
#ifdef USE_ASYNC
cudaDeviceSynchronize();
#endif
#endif
for (int s = 0; s<stream_range;s++){
cudaStreamDestroy(streams[s]);
}
auto wall_diff = wall_stop - wall_start;
auto wall_time = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(wall_diff).count()) / 1e6;
printf("setup time time=%f (s)\n", (setup_stop-setup_start)*0.001);
printf("done ntracks=%i tot time=%f (s) time/trk=%e (s)\n", nevts*ntrks*int(NITER), wall_time, wall_time/(nevts*ntrks*int(NITER)));
printf("formatted %i %i %i %i %i %f 0 %f %i\n",int(NITER),nevts, ntrks, bsize, nb, wall_time, (setup_stop-setup_start)*0.001, num_streams);
#ifdef DUMP_OUTPUT
FILE *fp_x;
FILE *fp_y;
FILE *fp_z;
fp_x = fopen("output_x.txt", "w");
fp_y = fopen("output_y.txt", "w");
fp_z = fopen("output_z.txt", "w");
#endif
double avgx = 0, avgy = 0, avgz = 0;
double avgpt = 0, avgphi = 0, avgtheta = 0;
double avgdx = 0, avgdy = 0, avgdz = 0;
for (size_t ie=0;ie<nevts;++ie) {
for (size_t it=0;it<ntrks;++it) {
float x_ = x(outtrk,ie,it);
float y_ = y(outtrk,ie,it);
float z_ = z(outtrk,ie,it);
float pt_ = 1./ipt(outtrk,ie,it);
float phi_ = phi(outtrk,ie,it);
float theta_ = theta(outtrk,ie,it);
#ifdef DUMP_OUTPUT
fprintf(fp_x, "ie=%lu, it=%lu, %f\n",ie, it, x_);
fprintf(fp_y, "%f\n", y_);
fprintf(fp_z, "%f\n", z_);
#endif
//if(x_ ==0 || y_==0||z_==0){
//printf("x: %f,y: %f,z: %f, ie: %d, it: %f\n",x_,y_,z_,ie,it);
//continue;
//}
avgpt += pt_;
avgphi += phi_;
avgtheta += theta_;
avgx += x_;
avgy += y_;
avgz += z_;
float hx_ = x(hit,ie,it);
float hy_ = y(hit,ie,it);
float hz_ = z(hit,ie,it);
//if(x_ ==0 || y_==0 || z_==0){continue;}
avgdx += (x_-hx_)/x_;
avgdy += (y_-hy_)/y_;
avgdz += (z_-hz_)/z_;
}
}
#ifdef DUMP_OUTPUT
fclose(fp_x);
fclose(fp_y);
fclose(fp_z);
fp_x = fopen("input_x.txt", "w");
fp_y = fopen("input_y.txt", "w");
fp_z = fopen("input_z.txt", "w");
#endif
avgpt = avgpt/double(nevts*ntrks);
avgphi = avgphi/double(nevts*ntrks);
avgtheta = avgtheta/double(nevts*ntrks);
avgx = avgx/double(nevts*ntrks);
avgy = avgy/double(nevts*ntrks);
avgz = avgz/double(nevts*ntrks);
avgdx = avgdx/double(nevts*ntrks);
avgdy = avgdy/double(nevts*ntrks);
avgdz = avgdz/double(nevts*ntrks);
double stdx = 0, stdy = 0, stdz = 0;
double stddx = 0, stddy = 0, stddz = 0;
for (size_t ie=0;ie<nevts;++ie) {
for (size_t it=0;it<ntrks;++it) {
float x_ = x(outtrk,ie,it);
float y_ = y(outtrk,ie,it);
float z_ = z(outtrk,ie,it);
stdx += (x_-avgx)*(x_-avgx);
stdy += (y_-avgy)*(y_-avgy);
stdz += (z_-avgz)*(z_-avgz);
float hx_ = x(hit,ie,it);
float hy_ = y(hit,ie,it);
float hz_ = z(hit,ie,it);
stddx += ((x_-hx_)/x_-avgdx)*((x_-hx_)/x_-avgdx);
stddy += ((y_-hy_)/y_-avgdy)*((y_-hy_)/y_-avgdy);
stddz += ((z_-hz_)/z_-avgdz)*((z_-hz_)/z_-avgdz);
#ifdef DUMP_OUTPUT
x_ = x(trk,ie,it);
y_ = y(trk,ie,it);
z_ = z(trk,ie,it);
fprintf(fp_x, "%f\n", x_);
fprintf(fp_y, "%f\n", y_);
fprintf(fp_z, "%f\n", z_);
#endif
}
}
#ifdef DUMP_OUTPUT
fclose(fp_x);
fclose(fp_y);
fclose(fp_z);
#endif
stdx = sqrtf(stdx/double(nevts*ntrks));
stdy = sqrtf(stdy/double(nevts*ntrks));
stdz = sqrtf(stdz/double(nevts*ntrks));
stddx = sqrtf(stddx/double(nevts*ntrks));
stddy = sqrtf(stddy/double(nevts*ntrks));
stddz = sqrtf(stddz/double(nevts*ntrks));
printf("track x avg=%f std/avg=%f\n", avgx, fabs(stdx/avgx));
printf("track y avg=%f std/avg=%f\n", avgy, fabs(stdy/avgy));
printf("track z avg=%f std/avg=%f\n", avgz, fabs(stdz/avgz));
printf("track dx/x avg=%f std=%f\n", avgdx, stddx);
printf("track dy/y avg=%f std=%f\n", avgdy, stddy);
printf("track dz/z avg=%f std=%f\n", avgdz, stddz);
printf("track pt avg=%f\n", avgpt);
printf("track phi avg=%f\n", avgphi);
printf("track theta avg=%f\n", avgtheta);
//free(trk);
//free(hit);
//free(outtrk);
cudaFree(trk);
cudaFree(hit);
cudaFree(outtrk);
return 0;
}
|
3,409 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 512 // You can change this
#define NUM_OF_ELEMS 1048576 // You can change this
/* #define funcCheck(stmt) { \ */
/* cudaError_t err = stmt; \ */
/* if (err != cudaSuccess) \ */
/* { \ */
/* printf( "Failed to run stmt %d ", __LINE__); \ */
/* printf( "Got CUDA error ... %s ", cudaGetErrorString(err)); \ */
/* return -1; \ */
/* } \ */
/* } */
__global__ void array_sum(float * input, float * output, int len){
// Load a segment of the input vector into shared memory
__shared__ float partialSum[2*BLOCK_SIZE];
int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x;
int t = threadIdx.x;
unsigned int start = 2*blockIdx.x*blockDim.x;
// load data to shared memory
// load first half
if ((start + t) < len){
partialSum[t] = input[start + t];
}
else{
partialSum[t] = 0.0;
}
// load latter half
if ((start + blockDim.x + t) < len){
partialSum[blockDim.x + t] = input[start + blockDim.x + t];
}
else{
partialSum[blockDim.x + t] = 0.0;
}
__syncthreads();
// Traverse reduction tree
// start to add
for (int stride = blockDim.x; stride > 0; stride /= 2){
if (t < stride)
partialSum[t] += partialSum[t + stride];
__syncthreads();
}
// Write the computed sum of the block to the output vector at correct index
if (t == 0 && (globalThreadId*2) < len){
output[blockIdx.x] = partialSum[t];
}
}
int main()
{
// host data
float * h_input; // The input 1D vector
// host output
float * h_output; // The output vector
//device device data
float * d_input;
// device output data
float * d_output;
int numOutputElements; // number of elements in the output list
h_input = (float*)malloc(sizeof(float) * NUM_OF_ELEMS);
// allocate host array
for (int i=0; i < NUM_OF_ELEMS; i++){
h_input[i] = 1.0; // Add your input values
}
//int NUM_OF_ELEMS = NUM_OF_ELEMS; // number of elements in the input list
numOutputElements = NUM_OF_ELEMS / (BLOCK_SIZE*2);
if (NUM_OF_ELEMS % (BLOCK_SIZE*2)){
numOutputElements++;
}
h_output = (float*) malloc(numOutputElements * sizeof(float));
//@@ Allocate GPU memory here
cudaMalloc((void **)&d_input, NUM_OF_ELEMS * sizeof(float));
cudaMalloc((void **)&d_output, numOutputElements * sizeof(float));
// Copy memory to the GPU here
cudaMemcpy(d_input, h_input, NUM_OF_ELEMS * sizeof(float), cudaMemcpyHostToDevice);
// Initialize the grid and block dimensions here
dim3 DimGrid( numOutputElements, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start);
// Launch the GPU Kernel here
array_sum<<<DimGrid, DimBlock>>>(d_input, d_output, NUM_OF_ELEMS);
// Copy the GPU memory back to the CPU here
cudaMemcpy(h_output, d_output, numOutputElements * sizeof(float), cudaMemcpyDeviceToHost);
/********************************************************************
* Reduce output vector on the host
********************************************************************/
for (int j = 1; j < numOutputElements; j++){
h_output[0] += h_output[j];
}
printf("Reduced Sum from GPU = %f\n", h_output[0]);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("GPU time to cuda reduction %f ms\n", milliseconds);
// Free the GPU memory here
cudaFree(d_input);
cudaFree(d_output);
free(h_input);
free(h_output);
return 0;
}
|
3,410 | #include <stdio.h>
void helloCPU()
{
printf("Hello from the CPU.\n");
}
/*
* `helloGPU` の定義を、GPU 上で起動できるカーネルに
* リファクタリングします。「Hello from the GPU!」と
* 出力されるようにメッセージを更新します。
*/
void helloGPU()
{
printf("Hello also from the CPU.\n");
}
int main()
{
helloCPU();
/*
* この `helloGPU` の呼び出しをリファクタリングして、
* GPU 上のカーネルとして起動するようにします。
*/
helloGPU();
/*
* この下に、CPU スレッドを続行する前に `helloGPU`
* カーネルの完了時に同期させるコードを追加します。
*/
}
|
3,411 | #include<stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("Hello world! I'm a thread in block %d\n", blockIdx.x);
}
int main()
{
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
cudaDeviceSynchronize();
return 0;
}
|
3,412 | #include <stdio.h>
#include <stdlib.h>
#define N 4 // quantidades de numeros
#define I 2 // adjacentes
// codigo device
__global__ void soma_adj(int *a){
int ind = threadIdx.x;
int pos_inicio = ind - I;
int pos_final = ind + I + 1;
int soma = 0;
if (ind < N){
for (int i=pos_inicio; i<pos_final; i++){
if (i >= 0 && i < N)
soma += a[i];
}
__syncthreads();
a[ind] = soma;
}
}
// Código host
int main(){
int a[N];
int* dev_a;
// Inicializando as variaveis do host
for (int i = 0; i < N; i++)
a[i] = i+1;
// Alocando espaço para as variaveis da GPU
cudaMalloc((void**)&dev_a, N*sizeof(int));
// Copiando as variaveis da CPU para a GPU
cudaMemcpy(dev_a, &a, N*sizeof(int), cudaMemcpyHostToDevice);
// Chamada à função da GPU (kernel)
// A terceira dimensao é omitida, ficando implícito o valor 1.
soma_adj<<<1, N>>>(dev_a);
// Copiando o resultado da GPU para CPU
cudaMemcpy(&a, dev_a, N*sizeof(int), cudaMemcpyDeviceToHost);
// Visualizando o resultado
for (int i=0; i<N; i++)
printf("%d ", a[i]);
// Liberando a memoria na GPU
cudaFree(dev_a);
return 0;
}
|
3,413 | #include "includes.h"
//#define __OUTPUT_PIX__
#define BLOCK_SIZE 32
__constant__ __device__ float lTable_const[1064];
__constant__ __device__ float mr_const[3];
__constant__ __device__ float mg_const[3];
__constant__ __device__ float mb_const[3];
__global__ void lin2lin_resmpl_good_gpu_kernel(float *dev_in_img, float *dev_out_img, float *dev_C0_tmp, float *dev_C1_tmp, float *dev_C2_tmp, int org_wd, int org_ht, int dst_wd, int dst_ht, int n_channels, float r, int *yas_const, int *ybs_const)
{
unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x);
unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y);
if ((x_pos < dst_wd) && (y_pos < dst_ht)) {
int ya, yb;
float *A00, *A01, *A02, *A03, *B00;
float *A10, *A11, *A12, *A13, *B10;
float *A20, *A21, *A22, *A23, *B20;
float *A0 = dev_in_img + (0 * org_ht * org_wd);
float *B0 = dev_out_img + (0 * dst_ht * dst_wd);
float *A1 = dev_in_img + (1 * org_ht * org_wd);
float *B1 = dev_out_img + (1 * dst_ht * dst_wd);
float *A2 = dev_in_img + (2 * org_ht * org_wd);
float *B2 = dev_out_img + (2 * dst_ht * dst_wd);
if (org_ht == dst_ht && org_wd == dst_wd) {
int out_img_idx = y_pos + (dst_wd * x_pos);
B0[out_img_idx] = A0[out_img_idx * n_channels];
B1[out_img_idx] = A1[out_img_idx * n_channels];
B2[out_img_idx] = A2[out_img_idx * n_channels];
return;
}
int y1 = 0;
if (org_ht == 2 * dst_ht) {
y1 += 2 * y_pos;
} else if (org_ht == 3 * dst_ht) {
y1 += 3 * y_pos;
} else if (org_ht == 4 * dst_ht) {
y1 += 4 * y_pos;
}
if (y_pos == 0)
y1 = 0;
ya = yas_const[y1];
A00 = A0 + (ya * org_wd);
A01 = A00 + (org_wd);
A02 = A01 + (org_wd);
A03 = A02 + (org_wd);
A10 = A1 + (ya * org_wd);
A11 = A00 + (org_wd);
A12 = A01 + (org_wd);
A13 = A02 + (org_wd);
A20 = A2 + (ya * org_wd);
A21 = A00 + (org_wd);
A22 = A01 + (org_wd);
A23 = A02 + (org_wd);
yb = ybs_const[y1];
B00 = B0 + (yb * dst_wd);
B10 = B1 + (yb * dst_wd);
B20 = B2 + (yb * dst_wd);
// resample along y direction
if (org_ht == 2 * dst_ht) {
dev_C0_tmp[x_pos] = A00[x_pos] + A01[x_pos];
dev_C1_tmp[x_pos] = A10[x_pos] + A11[x_pos];
dev_C2_tmp[x_pos] = A20[x_pos] + A21[x_pos];
} else if (org_ht == 3 * dst_ht) {
dev_C0_tmp[x_pos] = A00[x_pos] + A01[x_pos] + A02[x_pos];
dev_C1_tmp[x_pos] = A10[x_pos] + A11[x_pos] + A12[x_pos];
dev_C2_tmp[x_pos] = A20[x_pos] + A21[x_pos] + A22[x_pos];
} else if (org_ht == 4 * dst_ht) {
dev_C0_tmp[x_pos] = A00[x_pos] + A01[x_pos] + A02[x_pos] + A03[x_pos];
dev_C1_tmp[x_pos] = A10[x_pos] + A11[x_pos] + A12[x_pos] + A13[x_pos];
dev_C2_tmp[x_pos] = A20[x_pos] + A21[x_pos] + A22[x_pos] + A23[x_pos];
}
/* ensure that all threads have calculated the values for C until this point */
__syncthreads();
// resample along x direction (B -> C)
if (org_wd == 2 * dst_wd) {
B00[x_pos]= (dev_C0_tmp[2 * x_pos] + dev_C0_tmp[(2 * x_pos) + 1]) * (r / 2);
B10[x_pos]= (dev_C1_tmp[2 * x_pos] + dev_C1_tmp[(2 * x_pos) + 1]) * (r / 2);
B20[x_pos]= (dev_C2_tmp[2 * x_pos] + dev_C2_tmp[(2 * x_pos) + 1]) * (r / 2);
} else if (org_wd == 3 * dst_wd) {
B00[x_pos] = (dev_C0_tmp[3 * x_pos] + dev_C0_tmp[(3 * x_pos) + 1] + dev_C0_tmp[(3 * x_pos) + 2]) * (r / 3);
B10[x_pos] = (dev_C1_tmp[3 * x_pos] + dev_C1_tmp[(3 * x_pos) + 1] + dev_C1_tmp[(3 * x_pos) + 2]) * (r / 3);
B20[x_pos] = (dev_C2_tmp[3 * x_pos] + dev_C2_tmp[(3 * x_pos) + 1] + dev_C2_tmp[(3 * x_pos) + 2]) * (r / 3);
} else if (org_wd == 4 * dst_wd) {
B00[x_pos] = (dev_C0_tmp[4 * x_pos] + dev_C0_tmp[(4 * x_pos) + 1] + dev_C0_tmp[(4 * x_pos) + 2] + dev_C0_tmp[(4 * x_pos) + 3]) * (r / 4);
B10[x_pos] = (dev_C1_tmp[4 * x_pos] + dev_C1_tmp[(4 * x_pos) + 1] + dev_C1_tmp[(4 * x_pos) + 2] + dev_C1_tmp[(4 * x_pos) + 3]) * (r / 4);
B20[x_pos] = (dev_C2_tmp[4 * x_pos] + dev_C2_tmp[(4 * x_pos) + 1] + dev_C2_tmp[(4 * x_pos) + 2] + dev_C2_tmp[(4 * x_pos) + 3]) * (r / 4);
}
__syncthreads();
}
} |
3,414 | //pass
//--gridDim=1024 --blockDim=1024
#include <cuda.h>
__global__ void square_array(float* dataView)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
dataView[idx] = dataView[idx] * dataView[idx];
#ifdef MUTATION
dataView[idx+1] = dataView[idx+1];
#endif
/* BUGINJECT: ADD_ACCESS, UP */
}
|
3,415 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <chrono>
using namespace std::chrono;
// comparison between insert vs copy vs resize
int main(){
// c1. larger vec to smaller vec -> in
int n = 1000000;
int reps = 8;
int init_size = n*reps/2;
thrust::host_vector<float> tmaster(0);
thrust::device_vector<float> temp(n, 1);
auto START = high_resolution_clock::now();
for(int i = 0; i < reps; i++){
tmaster.insert(tmaster.end(), temp.begin(), temp.end());
}
auto End = high_resolution_clock::now();
// for( int i = 0; i < tmaster.size(); i++)
// std::cout << tmaster[i] << " ";
auto red_duration = duration_cast<microseconds>(End - START);
std::cout << "insert time = "<< red_duration.count()/1e6 << std::endl;
std::cout << std::endl;
std::cout << "tmaster.size()= " << tmaster.size() << std::endl;
return 0;
}
// int main(){
// // c1. larger vec to smaller vec -> in
// int n = 1000000;
// int reps = 8;
// int init_size = n*reps/2;
// thrust::host_vector<float> tmaster(2*n*reps);
// thrust::device_vector<float> temp(n, 1);
// auto START = high_resolution_clock::now();
// for(int i = 0; i < reps; i++){
// thrust::copy(temp.begin(), temp.end(), tmaster.begin() + i*n);
// }
// tmaster.resize(n*reps);
// auto End = high_resolution_clock::now();
// // for( int i = 0; i < tmaster.size(); i++)
// // std::cout << tmaster[i] << " ";
// auto red_duration = duration_cast<microseconds>(End - START);
// std::cout << "copy + resize time = "<< red_duration.count()/1e6 << std::endl;
// std::cout << std::endl;
// std::cout << "tmaster.size()= " << tmaster.size() << std::endl;
// return 0;
// }
|
3,416 | #include <stdio.h>
__device__ int addem( int a, int b )
{
return a + b;
}
__global__ void add( int a, int b, int *c )
{
*c = addem( a, b );
}
int main(void)
{
int a,b,c;
int *dev_c;
/* Allocate memory on the device */
cudaMalloc( (void**)&dev_c, sizeof(int));
a = 2;
b = 7;
add<<<1,1>>>(a, b, dev_c );
/* Copy contens of dev_c back to c */
cudaMemcpy( &c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf( "%d + %d = %d\n", a,b,c );
cudaFree( dev_c);
}
|
3,417 | #include "includes.h"
__global__ void glcm_calculation_180(int *A,int *glcm, const int nx, const int ny,int max){
//int iy = threadIdx.y + blockIdx.y* blockDim.y;
unsigned int idx =blockIdx.x*nx+threadIdx.x;
int i;
int k=0;
for(i=0;i<nx;i++){
if(idx>=i*nx && idx<((i+1) *nx)-1){
k=max*A[idx+1]+A[idx];
atomicAdd(&glcm[k],1);
}
}
} |
3,418 | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "n_body.h"
__global__ void calculateBodyForce(float4 *p, float4 *v, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float3 shared_position[BLOCK_SIZE];
float4 temp_position = p[tile * blockDim.x + threadIdx.x];
shared_position[threadIdx.x] = make_float3(temp_position.x, temp_position.y, temp_position.z);
__syncthreads(); //synchronoze to make sure all tile data is available in shared memory
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = shared_position[j].x - p[i].x;
float dy = shared_position[j].y - p[i].y;
float dz = shared_position[j].z - p[i].z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3;
}
__syncthreads(); // synchrnize before looping to other time
} //tile loop ends here
v[i].x += dt*Fx; v[i].y += dt*Fy; v[i].z += dt*Fz;
} //if ends here
}
int main(const int argc, const char** argv) {
int nBodies = 30000;
const float dt = 0.01f; // time step
const int nIters = 100; // simulation iterations
int size = 2*nBodies*sizeof(float4);
float *buf = (float*)malloc(size);
NBodySystem p = { (float4*)buf, ((float4*)buf) + nBodies };
generateRandomizeBodies(buf, 8*nBodies); // Init pos / vel data
float *d_buf;
cudaMalloc(&d_buf, size);
NBodySystem d_p = { (float4*)d_buf, ((float4*)d_buf) + nBodies };
int nBlocks = (nBodies + BLOCK_SIZE - 1) / BLOCK_SIZE;
for (int iter = 1; iter <= nIters; iter++) {
cudaMemcpy(d_buf, buf, size, cudaMemcpyHostToDevice);
calculateBodyForce<<<nBlocks, BLOCK_SIZE>>>(d_p.pos, d_p.vel, dt, nBodies);
cudaMemcpy(buf, d_buf, size, cudaMemcpyDeviceToHost);
for (int i = 0 ; i < nBodies; i++) { // integrate position
p.pos[i].x += p.vel[i].x*dt;
p.pos[i].y += p.vel[i].y*dt;
p.pos[i].z += p.vel[i].z*dt;
}
printf("Iteration %d\n", iter);
}
free(buf);
cudaFree(d_buf);
}
void generateRandomizeBodies(float *data, int n) {
float max = (float)RAND_MAX;
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / max) - 1.0f;
}
}
|
3,419 | #include <stdio.h>
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
int main(int argc, char*argv[]) {
int iDev=0;
cudaDeviceProp iProp;
cudaGetDeviceProperties(&iProp, iDev);
cout << "Device " << iDev << ": " << iProp.name << endl;
cout << "Number of multiprocessors: " << iProp.multiProcessorCount << endl;
cout << "Total amount of constant memory: " << iProp.totalConstMem/1024.0 << endl;
cout << "Total number of registers available per block: " << iProp.sharedMemPerBlock << endl;
cout << "Warp size: " << iProp.warpSize << endl;
cout << "Maximum number of threads per block: " << iProp.maxThreadsPerBlock << endl;
cout << "Maximum number of threads per multiprocessor: " << iProp.maxThreadsPerMultiProcessor << endl;
cout << "Maximum number of warps per multiprocessor: " << iProp.maxThreadsPerMultiProcessor/32 << endl;
return EXIT_SUCCESS;
}
|
3,420 | #include<stdio.h>
#include<cuda.h>
__global__ void dkernel()
{
printf("Hello World! \n");
}
int main()
{
dkernel<<<1, 32>>>(); //32 threads within 1 thread block
cudaDeviceSynchronize();
return 0;
} |
3,421 | #include <stdio.h>
#include <stdlib.h>
int reverseInt(int i) {
unsigned char ch1, ch2, ch3, ch4;
ch1 = i & 255;
ch2 = (i >> 8) & 255;
ch3 = (i >> 16) & 255;
ch4 = (i >> 24) & 255;
return ((int)ch1 << 24) + ((int)ch2 << 16) + ((int)ch3 << 8) + ch4;
}
void readMNIST(const char *imagefile,const char *labelfile, int num_images, int **imagedata, int **labeldata) {
/* read digits */
FILE *file = fopen(imagefile, "r");
if (file == NULL) {
fprintf(stderr, "%s open failure\n", imagefile);
exit(1);
}
int header = 0;
int count = 0;
int num_rows = 0;
int num_cols = 0;
fread(&header, sizeof(header), 1, file);
header = reverseInt(header);
if (header != 2051) {
fprintf(stderr, "Invalid image file header\n");
exit(1);
}
fread(&count, sizeof(count), 1, file);
count = reverseInt(count);
if (count < num_images) {
fprintf(stderr, "Trying to read too many digits\n");
exit(1);
}
fread(&num_rows, sizeof(num_rows), 1, file);
num_rows = reverseInt(num_rows);
fread(&num_cols, sizeof(num_cols), 1, file);
num_cols = reverseInt(num_cols);
/* int **images = (int**)malloc(sizeof(int*) * num_images); */
/* for (int i = 0; i < num_images; i++) { */
/* images[i] = (int*)malloc(sizeof(int) * num_rows * num_cols); */
/* } */
int *images = (int*)malloc(sizeof(int) * num_images * num_rows * num_cols);
int i, j , k;
for (i = 0; i < num_images; i++) {
for (j = 0; j < num_rows; j++) {
for (k = 0; k < num_cols; k++) {
unsigned char temp = 0;
fread(&temp, sizeof(temp), 1, file);
/* images[i][num_rows * j + k] = ((double)temp / 255) > 0.5 ? 1 : 0; */
images[num_rows * num_cols * i + num_rows * j + k] = ((double)temp / 255) > 0.5 ? 1 : 0;
}
}
}
fclose(file);
/* read labes */
file = fopen(labelfile, "r");
if (file == NULL) {
fprintf(stderr, "%s open failure\n", labelfile);
exit(1);
}
fread(&header, sizeof(header), 1, file);
header = reverseInt(header);
if (header != 2049) {
fprintf(stderr, "Invalid label file header\n");
exit(1);
}
fread(&count, sizeof(count), 1, file);
if (count < num_images) {
fprintf(stderr, "Trying to read too many digits\n");
exit(1);
}
int *labels = (int*)malloc(sizeof(int) * num_images);
for (i = 0; i < num_images; i++) {
unsigned char temp = 0;
fread(&temp, sizeof(temp), 1, file);
labels[i] = (int)temp;
}
fclose(file);
*imagedata = images;
*labeldata = labels;
}
|
3,422 | #include "includes.h"
struct MPIGlobalState {
// The CUDA device to run on, or -1 for CPU-only.
int device = -1;
// A CUDA stream (if device >= 0) initialized on the device
cudaStream_t stream;
// Whether the global state (and MPI) has been initialized.
bool initialized = false;
};
// MPI relies on global state for most of its internal operations, so we cannot
// design a library that avoids global state. Instead, we centralize it in this
// single global struct.
static MPIGlobalState global_state;
// Initialize the library, including MPI and if necessary the CUDA device.
// If device == -1, no GPU is used; otherwise, the device specifies which CUDA
// device should be used. All data passed to other functions must be on that device.
//
// An exception is thrown if MPI or CUDA cannot be initialized.
__global__ void kernel_add(const float* x, const float* y, const int N, float* out) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
out[i] = x[i] + y[i];
}
} |
3,423 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
/**
* Collates results of segment scan (or segment prefix sum), putting last value from each segment into an array
* Note that behavior for empty segments is undefined. E.g. if there's no segment with source node 2, then output[2] might contain garbage data.
* @param src The segment ID for each edge in the scan result
* @param scanResult The scan result or prefix sum result that we're collating
* @param output The output
* @param numEdges The size of the src and scanResult arrays.
*/
__global__ void collateSegments_gpu(int * src, int * scanResult, int * output, int numEdges) {
int totalThreads = blockDim.x * gridDim.x; //the total amount of threads
int tid = blockIdx.x * blockDim.x + threadIdx.x; //the thread ID
int i;
for (i = tid; i < numEdges; i+= totalThreads) {
//compares src[i] with src[i+1], if they are not equal, then the i-th data element is the last one in its own segment
//if the very last element of the input, just store it
if(src[i] == src[numEdges-1]) {
output[src[i]] = scanResult[i];
}
//check if the last element of the segment represented by the src value -> if it is, assign the output as the scanresult
if (src[i] != src[i+1]) {
output[src[i]] = scanResult[i];
}
}
}
|
3,424 | #include <stdio.h>
#include <cuda_runtime.h>
#define DATA_TYPE long long
__global__ void read_cache(DATA_TYPE* device_array, int array_size) {
int i;
DATA_TYPE* j = &device_array[0];
for (i = 0; i < array_size; i++)
j=*(DATA_TYPE**)j;
device_array[0] = (DATA_TYPE)j;
}
int main(int argc, char* argv[]) {
cudaError_t err = cudaSuccess;
DATA_TYPE* host_array = NULL;
DATA_TYPE* device_array = NULL;
size_t size;
int i;
if (argc < 3) {
printf("Not enough parameters! Exitting...\n");
return -1;
}
int ARRAY_SIZE = atoi(argv[1]);
int STRIDE = atoi(argv[2]);
size = sizeof(DATA_TYPE) * ARRAY_SIZE;
host_array = (DATA_TYPE*)malloc(size);
if (host_array == NULL) {
printf("Failed to malloc!\n");
return -1;
}
err = cudaMalloc((void**)&device_array, size);
if (err != cudaSuccess) {
printf("Failed to cudaMalloc!\n");
free(host_array);
return -1;
}
for (i = 0; i < ARRAY_SIZE; i++) {
DATA_TYPE t = i + STRIDE;
if (t >= ARRAY_SIZE) t %= STRIDE;
host_array[i] = (DATA_TYPE)device_array + (DATA_TYPE)sizeof(DATA_TYPE) * t;
}
err = cudaMemcpy(device_array, host_array, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("Failed to cudaMemcpy!\n");
free(host_array);
cudaFree(device_array);
return -1;
}
read_cache<<<1, 1>>>(device_array, ARRAY_SIZE);
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("Failed to invoke kernel!\n");
free(host_array);
cudaFree(device_array);
return -1;
}
free(host_array);
cudaFree(device_array);
return 0;
}
|
3,425 | #include "includes.h"
__global__ void smooth_shared(float * v_new, const float * v) {
extern __shared__ float s[];
int id = blockDim.x * blockIdx.x + threadIdx.x;
s[threadIdx.x + 1] = v[id];
if (threadIdx.x == 0) {
int start = blockDim.x * blockIdx.x;
int left = max(0, start - 1);
s[0] = v[left];
int end = blockDim.x * gridDim.x;
int right = min(end - 1, blockDim.x * blockIdx.x + blockDim.x);
s[blockDim.x + 1] = v[right];
}
__syncthreads();
int tid = threadIdx.x + 1;
v_new[id] = 0.25f * s[tid - 1] + 0.5f * s[tid] + 0.25f * s[tid + 1];
} |
3,426 | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#multi-device-synchronization-cg
// typedef struct CUDA_LAUNCH_PARAMS_st {
// CUfunction function;
// unsigned int gridDimX;
// unsigned int gridDimY;
// unsigned int gridDimZ;
// unsigned int blockDimX;
// unsigned int blockDimY;
// unsigned int blockDimZ;
// unsigned int sharedMemBytes;
// CUstream hStream;
// void **kernelParams;
// } CUDA_LAUNCH_PARAMS;
// https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define CUDACHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// https://devblogs.nvidia.com/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/
// rotate rows up from some row offset `rotate`
/*
e.g.
rotate 1
row 0 | a b c d -> row 1 | e f g h
row 1 | e f g h -> row 2 | i j k l
row 2 | i j k l -> row 3 | m n o p
row 3 | m n o p -> row 0 | a b c d
*/
#define DEBUG 0
// these are somewhat arbitrarily picked.
// tries to use fewer threads if there isn't enough data.
#define MAX_BLOCKS 32*1024
#define COPY_THREADS 4*32
#define max(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
#define min(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a < _b ? _a : _b; })
// rotates used by bucket algo.
__global__ void rotate(
double *dst, double *src, size_t width, int height, int rotate
) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
size_t n = (width*height);
rotate = rotate % height;
for (
int dst_offset = index, src_offset = (index + rotate*width) % n;
dst_offset < n;
dst_offset += num_threads,
src_offset = (src_offset + num_threads) % n
) {
dst[dst_offset] = src[src_offset];
}
}
__global__ void rotate_rev(
double *dst, double *src, size_t width, int height, int rotate
) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
size_t n = (width*height);
// row offset is going to be indexed forward,
int row_offset = index % width;
// but each successive col will precede the last.
int col_offset = index / width;
/*
for rotate=2, height=4
dst 0 1 2 3 | 4 5 6 7 | 8 9 A B | C D E F
src 8 9 A B | 4 5 6 7 | 0 1 2 3 | C D E F
*/
int num_cols = num_threads / width;
src += row_offset;
int dst_offset = col_offset*width + row_offset;
int src_col = ((height + rotate - col_offset) % height);
for (
;
dst_offset < n;
dst_offset += num_threads,
src_col = (height + src_col - num_cols) % height
) {
dst[dst_offset] = src[src_col*width];
}
}
// packing used by mst.
__global__ void pack(
double *dst, double *src, int value, int rank, int i
) {
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// int num_threads = blockDim.x * gridDim.x;
//
// for (int j0 = 0; j0 < i; ++j0) {
//
// }
}
// assumes the device that the buffers belong to is the current device
void MPI_All2All_bucket(
double** sendbufs, size_t sendcount,
double** recvbufs, size_t recvcount,
double** tempbufs,
int size,
// just use this for debugging
double** host_buffers, int ln
) {
size_t bytes_per_elem = sizeof(double);
// rotate the data in sendbuf to tempbuf.
for (int rank = 0; rank < size; rank++) {
CUDACHECK( cudaSetDevice(rank) );
size_t threads = min(sendcount, COPY_THREADS);
size_t blocks = min(MAX_BLOCKS, max(1, sendcount / COPY_THREADS));
rotate<<<blocks,threads>>>(tempbufs[rank], sendbufs[rank], sendcount, size, rank);
}
// #if DEBUG
// printf("after rotate\n");
// for (int rank = 0; rank < size; rank++) {
// printf("p%d\t\t", rank+1);
// CUDACHECK( cudaSetDevice(rank) );
// CUDACHECK( cudaMemcpy(host_buffers[rank], tempbufs[rank], ln * sizeof(double), cudaMemcpyDeviceToHost) );
// }
// printf("\n");
//
// for(int i = 0; i != ln; ++i) {
// for (int rank = 0; rank < size; rank++) {
// printf("%f\t", host_buffers[rank][i]);
// }
// printf("\n");
// }
// printf("\n");
// #endif
for (int i = 1; i < size; i++) {
for (int rank = 0; rank < size; rank++) {
CUDACHECK( cudaSetDevice(rank) );
CUDACHECK( cudaDeviceSynchronize() );
}
// task(?) size
int ts = size - i;
if (i % 2 == 1) {
// send to right,
for (int rank = 0; rank < size; rank++) {
int rank_right = (rank + 1) % size;
CUDACHECK( cudaSetDevice(rank) );
CUDACHECK( cudaMemcpyPeerAsync(
recvbufs[rank_right] + i*recvcount, // dst ptr
rank_right, // dst device
tempbufs[rank] + i*sendcount, // src ptr
rank, // src device
sendcount*ts*bytes_per_elem // count
));
}
// copy my chunk of received buffer into tempbuf.
for (int rank = 0; rank < size; rank++) {
CUDACHECK( cudaSetDevice(rank) );
CUDACHECK( cudaMemcpyAsync(
tempbufs[rank] + i*recvcount, // dst ptr
recvbufs[rank] + i*recvcount, // src ptr
recvcount*bytes_per_elem, // count
cudaMemcpyDeviceToDevice // kind
) );
}
} else {
// send to right.
for (int rank = 0; rank < size; rank++) {
int rank_right = (rank + 1) % size;
CUDACHECK( cudaSetDevice(rank) );
CUDACHECK( cudaMemcpyPeerAsync(
tempbufs[rank_right] + i*recvcount, // dst ptr
rank_right, // dst device
recvbufs[rank] + i*sendcount, // src ptr
rank, // src device
sendcount*ts*bytes_per_elem // count
));
}
}
}
// rotate the data in tempbuf to recvbuf.
for (int rank = 0; rank < size; rank++) {
CUDACHECK( cudaSetDevice(rank) );
size_t threads = min(sendcount, COPY_THREADS);
size_t blocks = max(1, sendcount / COPY_THREADS);
rotate_rev<<<blocks,threads>>>(recvbufs[rank], tempbufs[rank], sendcount, size, rank);
}
}
void MPI_All2All_mst(
double** sendbufs, size_t sendcount,
double** recvbufs, size_t recvcount,
double** tempbufs,
int size,
// just use this for debugging
double** host_buffers, int ln
) {
size_t bytes_per_elem = sizeof(double);
// copy data to the destination buffer
for (int rank = 0; rank < size; rank++) {
CUDACHECK( cudaSetDevice(rank) );
CUDACHECK( cudaMemcpy(
recvbufs[rank], // dst ptr
sendbufs[rank], // src ptr
size*sendcount*bytes_per_elem, // count
cudaMemcpyDeviceToDevice // kind
) );
}
int hsize = size >> 1;
int value = hsize;
for (int i = 1; i < size; i *= 2) {
for (int rank = 0; rank < size; rank++) {
CUDACHECK( cudaSetDevice(rank) );
int offset = ((rank & value) == 0);
// pack the data
for (int j0 = 0; j0 < i; ++j0) {
CUDACHECK( cudaMemcpy(
tempbufs[rank], // dst ptr
recvbufs[rank] + (j0*(value << 1) + value*offset) * sendcount, // src ptr
value*sendcount*bytes_per_elem, // count
cudaMemcpyDeviceToDevice // kind
) );
}
}
// point to point
for (int rank = 0; rank < size; rank++) {
CUDACHECK( cudaSetDevice(rank) );
int srcdest = rank ^ value;
CUDACHECK( cudaMemcpyPeer(
tempbufs[rank] + 1 * size/2 * sendcount, // dst ptr
rank, // dst device
tempbufs[srcdest] + 0 * size/2 * sendcount, // src ptr
srcdest, // src device
hsize * sendcount*bytes_per_elem // count
));
CUDACHECK( cudaMemcpyPeer(
tempbufs[srcdest] + 1 * size/2 * sendcount, // dst ptr
srcdest, // dst device
tempbufs[rank] + 0 * size/2 * sendcount, // src ptr
rank, // src device
hsize * sendcount*bytes_per_elem // count
));
}
for (int rank = 0; rank < size; rank++) {
CUDACHECK( cudaSetDevice(rank) );
int offset = ((rank & value) == 0);
// unpack the data.
for (int j0 = 0; j0 < i; ++j0) {
CUDACHECK( cudaMemcpy(
recvbufs[rank] + (j0*(value << 1) + value*offset) * sendcount, // dst ptr
tempbufs[rank] + 1 * size/2 * sendcount, // src ptr
value*sendcount*bytes_per_elem, // count
cudaMemcpyDeviceToDevice // kind
) );
}
}
value = (value >> 1);
}
}
int main(int argc, char **argv) {
int n = atoi(argv[1]);
int p = atoi(argv[2]);
// min of p and device count
int device_count = 0;
CUDACHECK( cudaGetDeviceCount(&device_count) );
p = p > device_count ? device_count : p;
int ln = n / p;
int sn = ln / p;
// allocate shared memory buffers to be used for communicating between
// GPUs. Host mem buffers too??
// should I not be using malloc managed?
// "The pointer is valid on the CPU and on all GPUs in the system that support
// managed memory. All accesses to this pointer must obey the Unified Memory
// programming model."
//The application can also guide the driver about memory usage patterns via
// cudaMemAdvise. The application can also explicitly migrate memory to a
// desired processor's memory via cudaMemPrefetchAsync.
double **send_buffers;
double **recv_buffers;
double **temp_buffers;
double **host_buffers;
CUDACHECK( cudaMallocHost(&send_buffers, p * sizeof(double *)) );
CUDACHECK( cudaMallocHost(&recv_buffers, p * sizeof(double *)) );
CUDACHECK( cudaMallocHost(&temp_buffers, p * sizeof(double *)) );
CUDACHECK( cudaMallocHost(&host_buffers, p * sizeof(double *)) );
for (int device = 0; device < p; device++) {
CUDACHECK( cudaSetDevice(device) );
for (int peer = 0; peer < p; peer++) {
int canAccessPeer = 0;
cudaDeviceCanAccessPeer(&canAccessPeer, device, peer);
if (canAccessPeer) {
cudaDeviceEnablePeerAccess(peer, 0);
printf("%d can access %d\n", device, peer);
}
}
CUDACHECK( cudaMalloc(send_buffers + device, ln * sizeof(double)) );
CUDACHECK( cudaMalloc(recv_buffers + device, ln * sizeof(double)) );
CUDACHECK( cudaMalloc(temp_buffers + device, ln * sizeof(double)) );
CUDACHECK( cudaMallocHost(host_buffers + device, ln * sizeof(double)) );
}
/*
// using managed memory or set device and allocate buffers for each?
double *send_buffer, *recv_buffer, *temp_buffer;
// ln if allocating per device, n if allocating for all devices.
CUDACHECK( cudaMallocManaged(&send_buffer, n * sizeof(double)) );
CUDACHECK( cudaMallocManaged(&recv_buffer, n * sizeof(double)) );
CUDACHECK( cudaMallocManaged(&temp_buffer, n * sizeof(double)) );
*/
// if we're using peer communication, make sure all the devices we're using
// can actually perform communicate with the nodes it needs to.
// initialize data
for (int device = 0; device < p; device++) {
CUDACHECK( cudaSetDevice(device) );
// initialize host buffer to be transferred to device
for(int i = 0; i != ln; ++i) {
host_buffers[device][i] = (device+1) * 1.0 + (i+1)*0.01;
}
// copy from host to device
CUDACHECK( cudaMemcpy(send_buffers[device], host_buffers[device], ln * sizeof(double), cudaMemcpyHostToDevice) );
CUDACHECK( cudaMemset(recv_buffers[device], 0, ln * sizeof(double)) );
CUDACHECK( cudaMemset(temp_buffers[device], 0, ln * sizeof(double)) );
}
#if DEBUG
for (int device = 0; device < p; device++) {
printf("p%d\t\t", device+1);
}
printf("\n");
for(int i = 0; i != ln; ++i) {
for (int device = 0; device < p; device++) {
printf("%f\t", host_buffers[device][i]);
}
printf("\n");
}
printf("\n");
#endif
// device events for timing
cudaEvent_t *start = (cudaEvent_t *) malloc(p*sizeof(cudaEvent_t));
cudaEvent_t *stop = (cudaEvent_t *) malloc(p*sizeof(cudaEvent_t));
for (int device = 0; device < p; device++) {
cudaSetDevice(device);
CUDACHECK( cudaEventCreate(start + device) );
CUDACHECK( cudaEventCreate(stop + device) );
}
CUDACHECK( cudaGetLastError() );
// Start
for (int device = 0; device < p; device++) {
cudaSetDevice(device);
CUDACHECK( cudaEventRecord(start[device]) );
}
// All to all
// in the future, replace p with MPI_COMM_WORLD-type configuration for comm.
MPI_All2All_bucket(
send_buffers, sn,
recv_buffers, sn,
temp_buffers,
p,
host_buffers, ln
);
// MPI_All2All_mst(
// send_buffers, sn,
// recv_buffers, sn,
// temp_buffers,
// p,
// host_buffers, ln
// );
// Stop
for (int device = 0; device < p; device++) {
cudaSetDevice(device);
CUDACHECK( cudaEventRecord(stop[device]) );
}
for (int device = 0; device < p; device++) {
cudaSetDevice(device);
cudaDeviceSynchronize();
float time_ms;
CUDACHECK( cudaEventElapsedTime(&time_ms, start[device], stop[device]) );
printf("p%d: %f ms\n", device + 1, time_ms);
}
#if DEBUG
for (int device = 0; device < p; device++) {
printf("p%d\t\t", device+1);
CUDACHECK( cudaSetDevice(device) );
CUDACHECK( cudaMemcpy(host_buffers[device], recv_buffers[device], ln * sizeof(double), cudaMemcpyDeviceToHost) );
}
printf("\n");
for(int i = 0; i != ln; ++i) {
for (int device = 0; device < p; device++) {
printf("%f\t", host_buffers[device][i]);
}
printf("\n");
}
#endif
// Free buffers
for (int device = 0; device < p; device++) {
CUDACHECK( cudaFreeHost(host_buffers[device]) );
CUDACHECK( cudaFree(send_buffers[device]) );
CUDACHECK( cudaFree(recv_buffers[device]) );
CUDACHECK( cudaFree(temp_buffers[device]) );
}
}
|
3,427 | /*
* File: cuda_helper.cu
* Assignment: 5
* Students: Teun Mathijssen, David Puroja
* Student email: teun.mathijssen@student.uva.nl, david.puroja@student.uva.nl
* Studentnumber: 11320788, 10469036
*
* Description: File containing functions used to allocate, copy and free
* device memory and to check is a call is succesful.
*/
#include <iostream>
using namespace std;
/* Utility function, use to do error checking.
* Use this function like this:
* checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
* And to check the result of a kernel invocation:
* checkCudaCall(cudaGetLastError());
*/
void checkCudaCall(cudaError_t result){
if (result != cudaSuccess) {
cerr << "cuda error: " << cudaGetErrorString(result) << endl;
exit(1);
}
}
/* Allocates a int (array) in the device memory. */
void* allocateDeviceMemory(unsigned int size) {
void* pointer = NULL;
checkCudaCall(cudaMalloc((void **) &pointer, size));
if (pointer == NULL) {
cout << "could not allocate memory on the GPU." << endl;
exit(1);
}
else {
return pointer;
}
}
/* Copies memory on the device to another memory adress on the device. */
void memcpyDeviceToDevice(void* target, void* source, unsigned int size) {
checkCudaCall(cudaMemcpy(target, source, size, \
cudaMemcpyDeviceToDevice));
}
/* Copies memory on the host to a memory adress on the device. */
void memcpyHostToDevice(void* target, void* source, unsigned int size) {
checkCudaCall(cudaMemcpy(target, source, size, \
cudaMemcpyHostToDevice));
}
/* Copies memory on the device to the host. */
void memcpyDeviceToHost(void* target, void* source, unsigned int size) {
checkCudaCall(cudaMemcpy(target, source, size, \
cudaMemcpyDeviceToHost));
}
/* Frees memory from the GPU. */
void freeDeviceMemory(void* pointer) {
checkCudaCall(cudaFree(pointer));
}
|
3,428 | /////////////////////////////////////////////////////////
// Auther: Aditya Mitkari
// Date: 4/5/19
//
// Description: Serial code for RFI mitigation
// Input: file / array of frequecy values for given DM
// Ouput: Frequecy values free of RFI
/////////////////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
//#include "headers/params.h"
void rfi(int nsamp, int nchans, unsigned short **input_buffer)
{
int file_reducer = 1;
float sigma_cut = 2.0f;
float *stage = ( float* )malloc( ( size_t ) nsamp * ( size_t )nchans * sizeof( float ) );
int cn = 0;
for( int c = 0; c < nchans; c++ )
{
for( int t = 0; t < nsamp; t++ )
{
stage[ c * ( size_t )nsamp + t ] = ( float ) ( *input_buffer )[ c + ( size_t )nchans * t ];
}
}
// ~~~ RFI Correct ~~~ //
double orig_mean = 0.0;
double orig_var=0.0;
// Find the mean and SD of the input data (we'll use this to rescale the data at the end of the process.
for( int c = 0; c < nchans; c++ )
{
for( int t = 0; t < ( nsamp ); t++)
orig_mean+=stage[ c * ( size_t )nsamp + t ];
}
orig_mean /= ( nsamp * nchans );
for( int c = 0; c < nchans; c++ )
{
for( int t = 0; t < ( nsamp ); t++ )
{
orig_var += ( stage[ c * ( size_t )nsamp + t ] - orig_mean ) * ( stage[ c * ( size_t )nsamp + t ] - orig_mean );
}
}
orig_var /= ( nsamp * nchans );
orig_var = sqrt( orig_var );
//printf( "orig_mean %f\n", orig_mean );
//printf( "orig_var %f\n", orig_var );
// Random Vectors
float *random_chan_one = ( float* )malloc( nsamp * sizeof( float ) );
float *random_chan_two = ( float* )malloc( nsamp * sizeof( float ) );
for( int t = 0; t < nsamp; t++ )
{
float x1, x2, w, y1, y2;
do {
x1 = 2.0 * ( ( float )rand() / ( float )RAND_MAX ) - 1.0;
x2 = 2.0 * ( ( float )rand() / ( float )RAND_MAX ) - 1.0;
w = x1 * x1 + x2 * x2;
} while( w >= 1.0 );
w = sqrt( ( -2.0 * log( w ) ) / w);
y1 = x1 * w;
y2 = x2 * w;
random_chan_one[ t ] = y1;
random_chan_two[ t ] = y2;
}
float *random_spectra_one = ( float* )malloc( nchans * sizeof( float ) );
float *random_spectra_two = ( float* )malloc( nchans * sizeof( float ) );
for(int c = 0; c < nchans; c++)
{
float x1, x2, w, y1, y2;
do {
x1 = 2.0 * ( ( float )rand() / ( float )RAND_MAX ) - 1.0;
x2 = 2.0 * ( ( float )rand() / ( float )RAND_MAX ) - 1.0;
w = x1 * x1 + x2 * x2;
} while( w >= 1.0 );
w = sqrt( ( -2.0 * log( w ) ) / w );
y1 = x1 * w;
y2 = x2 * w;
random_spectra_one[ c ] = y1;
random_spectra_two[ c ] = y2;
}
// Allocate working arrays
int *chan_mask = ( int* )malloc( nchans * sizeof( int ) );
for( int c = 0; c < nchans; c++ ) chan_mask[ c ] = 1;
int *spectra_mask = ( int* )malloc( nsamp * sizeof( int ) );
for( int t = 0; t < nsamp; t++ ) spectra_mask[ t ] = 1;
double *chan_mean = ( double* )malloc( nchans * sizeof( double ) );
for( int c = 0; c < nchans; c++ ) chan_mean[ c ] = 0.0;
double *chan_var = ( double* )malloc( nsamp * sizeof( double ) );
for( int c = 0; c < nchans; c++ ) chan_var[ c ] = 0.0;
double *spectra_mean = ( double* )malloc( nsamp * sizeof( double ) );
for( int t = 0; t < nsamp; t++ ) spectra_mean[ t ] = 0.0;
double *spectra_var = ( double* )malloc( nsamp * sizeof( double ) );
for( int t = 0; t < nsamp; t++ ) spectra_var[ t ] = 0.0;
// Find the BLN and try to flatten the input data per channel (remove non-stationary component).
for( int c = 0; c < nchans; c++ )
{
int counter = 0;
for( int t = 0; t < nsamp; t++ ) spectra_mask[ t ] = 1;
int finish = 0;
int rounds = 1;
double old_mean = 0.0;
double old_var = 0.0;
while( finish == 0 )
{
counter = 0;
chan_mean[ c ] = 0.0;
for( int t = 0; t < ( nsamp ); t++ )
{
if( spectra_mask[ t ] == 1 )
{
chan_mean[ c ] += stage[ c * ( size_t )nsamp + t ];
counter++;
}
}
//printf( "\nchan_mean %lf\n", chan_mean[ c ] );
//printf( "\ncounter%u\n", counter );
if( counter == 0 )
{
//printf( "\nCounter zero, Channel %d", c );
chan_mask[ c ] = 0;
finish = 1;
break;
}
chan_mean[ c ] /= ( counter );
counter = 0;
chan_var[ c ] = 0.0;
for( int t = 0; t < ( nsamp ); t++)
{
if( spectra_mask[ t ] == 1 )
{
chan_var[ c ] += ( stage[ c * (size_t)nsamp + t ] - chan_mean[ c ] ) * ( stage[ c * (size_t)nsamp + t ] - chan_mean[ c ] );
counter++;
}
}
//printf( "\nchan_var %lf\n", chan_var[c] );
//printf( "\ncounter %u\n", counter );
chan_var[ c ] /= ( counter );
chan_var[ c ] = sqrt( chan_var[ c ] );
if( ( chan_var[ c ] ) * 1000000.0 < 0.1 )
{
////printf("\nVarience zero, Channel %d %d %lf %.16lf\n", c, rounds, chan_mean[c], chan_var[c] );
chan_mask[ c ] = 0;
finish = 1;
break;
}
for( int t = 0; t < ( nsamp ); t++ )
{
if( ( ( stage[ c * ( size_t )nsamp + t ] - chan_mean[ c ] ) / chan_var[ c ] ) > sigma_cut || ( ( stage[ c * ( size_t )nsamp + t ] - chan_mean[ c ] ) / chan_var[ c ] ) < -sigma_cut )
{
spectra_mask[ t ] = 0;
}
else
{
spectra_mask[ t ] = 1;
}
}
if( fabs( chan_mean[ c ] - old_mean ) < 0.001 && fabs( chan_var[ c ] - old_var ) < 0.0001 && rounds > 1)
{
////printf("\n%d\t%d\t%.16lf\t%.16lf\t%.16lf\t%.16lf", c, rounds, (chan_mean[c]-old_mean), (chan_var[c]-old_var), chan_mean[c], chan_var[c]);
finish = 1;
}
old_mean = chan_mean[ c ];
old_var = chan_var[ c ];
rounds++;
}
//printf( "\nChan mean, var: %lf %lf\n", chan_mean[ c ], chan_var[ c ] );
if( chan_mask[ c ] != 0 )
{
for( int t = 0; t < ( nsamp ); t++ )
{
stage[ c * ( size_t )nsamp + t ] = ( stage[ c * ( size_t )nsamp + t ] - ( float )chan_mean[ c ] ) / ( float )chan_var[ c ];
}
}
else
{
int perm_one = ( int )( ( ( float )rand() / ( float )RAND_MAX ) * nsamp );
for( int t = 0; t < nsamp; t++ )
{
stage[ c * ( size_t )nsamp + t ] = random_chan_one[ ( t + perm_one ) % nsamp ];
}
chan_mean[ c ] = 0.0;
chan_var[ c ] = 1.0;
chan_mask[ c ] = 1;
}
}
// Find the BLN and try to flatten the input data per spectra (remove non-stationary component).
unsigned int cnt = 0;
for( int t = 0; t < nsamp; t++ )
{
int counter = 0;
for( int c = 0; c < nchans; c++ )
chan_mask[ c ]=1;
int finish = 0;
int rounds = 1;
double old_mean = 0.0;
double old_var = 0.0;
while( finish == 0 )
{
cnt += 1;
counter = 0;
spectra_mean[ t ] = 0.0;
for( int c = 0; c < nchans; c++ )
{
if( chan_mask[ c ] == 1 )
{
spectra_mean[ t ] += stage[ c * ( size_t )nsamp + t ];
counter++;
}
}
//printf( "\nSpectra mean %lf\n", spectra_mean[ t ] );
//printf( "counter %d\n", counter );
if( counter == 0 )
{
//printf( "\nCounter zero, Spectra %d", t );
spectra_mask[ t ] = 0;
finish = 1;
break;
}
spectra_mean[ t ] /= (counter);
counter = 0;
spectra_var[ t ] = 0.0;
for( int c = 0; c < nchans; c++ )
{
if( chan_mask[ c ] == 1 )
{
spectra_var[ t ] += ( stage[ c * ( size_t )nsamp + t ] - spectra_mean[ t ] ) * ( stage[ c * ( size_t )nsamp + t ] - spectra_mean[ t ] );
counter++;
}
}
//printf( "spectra_var %lf\n", spectra_var[ t ] );
//printf( "counter %u\n", counter );
spectra_var[ t ] /= (counter);
spectra_var[ t ] = sqrt( spectra_var[ t ] );
if( ( spectra_var[ t ] ) * 1000000.0 < 0.1 )
{
////printf("\nVarience zero, Spectra %d %d %lf %.16lf", t, rounds, spectra_mean[t], spectra_var[t] );
spectra_mask[ t ] = 0;
finish = 1;
break;
}
if( spectra_mask[ t ] != 0 )
{
for( int c = 0; c < nchans; c++ )
{
if( ( ( stage[ c * (size_t)nsamp + t ] - spectra_mean[ t ] ) / spectra_var[ t ] ) > sigma_cut || ( ( stage[ c * (size_t)nsamp + t ] - spectra_mean[ t ] ) / spectra_var[ t ] ) < -sigma_cut)
{
chan_mask[ c ] = 0;
}
else
{
chan_mask[ c ] = 1;
}
}
}
if( fabs( spectra_mean[ t ] - old_mean ) < 0.001 && fabs( spectra_var[ t ] - old_var ) < 0.0001 && rounds > 1)
{
////printf("\n%d\t%d\t%.16lf\t%.16lf\t%.16lf\t%.16lf", t, rounds, (spectra_mean[t] - old_mean), (spectra_var[t] - old_var), spectra_mean[t], spectra_var[t]);
finish = 1;
}
old_mean = spectra_mean[ t ];
old_var = spectra_var[ t ];
rounds++;
}
//return;
////printf("Spectra mean, var: %lf %d\n", spectra_mean[t], spectra_var[t] );
if( spectra_mask[ t ] != 0)
{
for( int c = 0; c < nchans; c++ )
{
stage[ c * (size_t)nsamp + t ] = ( stage[ c * (size_t)nsamp + t ] - (float)spectra_mean[ t ] ) / (float)spectra_var[ t ];
}
}
else
{
int perm_one = (int)( ( (float)rand() / (float)RAND_MAX ) * nchans);
for( int c = 0; c < nchans; c++ )
{
stage[ c * (size_t)nsamp + t ] = random_spectra_one[ ( c + perm_one ) % nchans ];
}
spectra_mean[ t ] = 0.0;
spectra_var[ t ] = 1.0;
spectra_mask[ t ] = 1;
}
}
//printf( "cnt is %u\n", cnt );
double mean_rescale = 0.0;
double var_rescale = 0.0;
// Find the mean and SD of the mean and SD...
int finish = 0;
int rounds = 1;
int counter = 0;
double mean_of_mean = 0.0;
double var_of_mean = 0.0;
double mean_of_var = 0.0;
double var_of_var = 0.0;
double old_mean_of_mean = 0.0;
double old_var_of_mean = 0.0;
double old_mean_of_var = 0.0;
double old_var_of_var = 0.0;
for( int c = 0; c < nchans; c++ ) chan_mask[ c ] = 1;
while(finish == 0)
{
mean_of_mean = 0.0;
counter = 0;
for( int c = 0; c < nchans; c++ )
{
if( chan_mask[ c ] == 1 )
{
mean_of_mean += chan_mean[ c ];
counter++;
}
}
//printf("mm is %lf\n",mean_of_mean );
mean_of_mean /= counter;
var_of_mean = 0.0;
counter = 0;
for( int c = 0; c < nchans; c++ )
{
if( chan_mask[ c ] == 1 )
{
var_of_mean += ( chan_mean[ c ] - mean_of_mean ) * ( chan_mean[ c ] - mean_of_mean );
counter++;
}
}
//printf( "\nvar_of_mean %lf\n", var_of_var );
//printf( "\ncounter %u\n", counter );
var_of_mean /= ( counter );
var_of_mean = sqrt( var_of_mean );
mean_of_var = 0.0;
counter = 0;
for( int c = 0; c < nchans; c++ )
{
if( chan_mask[ c ] == 1 )
{
mean_of_var += chan_var[ c ];
counter++;
}
}
//printf("\nmean_of_var %lf\n",mean_of_var );
//printf("\ncounter %u\n",counter );
mean_of_var /= counter;
var_of_var = 0.0;
counter = 0;
for( int c = 0; c < nchans; c++ )
{
if( chan_mask[ c ] == 1 )
{
var_of_var += ( chan_var[ c ] - mean_of_var ) * ( chan_var[ c ] - mean_of_var);
counter++;
}
}
//printf("\nvar_of_var %lf\n",var_of_var );
//printf("\ncounter %u\n",counter);
var_of_var /= (counter);
var_of_var = sqrt( var_of_var );
for( int c = 0; c < nchans; c++ )
if( fabs( chan_mean[ c ] - mean_of_mean ) / var_of_mean > sigma_cut || fabs( chan_var[ c ] - mean_of_var ) / var_of_var > sigma_cut )
chan_mask[ c ] = 0;
if(fabs(mean_of_mean - old_mean_of_mean) < 0.001 &&
fabs(var_of_mean - old_var_of_mean ) < 0.001 &&
fabs(mean_of_var - old_mean_of_var ) < 0.001 &&
fabs(var_of_var - old_var_of_var ) < 0.001)
{
finish = 1;
}
old_mean_of_mean = mean_of_mean;
old_var_of_mean = var_of_mean;
old_mean_of_var = mean_of_var;
old_var_of_var = var_of_var;
rounds++;
}
//printf("\n0 %lf %lf", mean_of_mean, var_of_mean);
//printf("\n0 %lf %lf", mean_of_var, var_of_var);
mean_rescale = mean_of_mean;
var_rescale = mean_of_var;
float clipping_constant = 0.0;
for( int c = 0; c < nchans; c++ ) clipping_constant += chan_mask[ c ];
clipping_constant = ( nchans - clipping_constant ) / nchans;
//printf("\n clipping_constant is %f\n",clipping_constant );
clipping_constant = sqrt( -2.0 * log( clipping_constant * 2.506628275 ) );
//printf("This This %f\n",clipping_constant );
// Perform channel replacement
for( int c = 0; c < nchans; c++ )
{
if( fabs( ( chan_mean[ c ] - mean_of_mean ) / var_of_mean ) > clipping_constant && fabs( ( chan_var[ c ] - mean_of_var ) / var_of_var ) > clipping_constant )
{
////printf("\nReplacing Channel %d %lf %lf", c, chan_mean[c], chan_var[c]);
int perm_one = (int)( ( (float)rand() / (float)RAND_MAX ) * nsamp );
for( int t = 0; t < (nsamp); t++ )
{
stage[ ( c * (size_t)nsamp + t) ] = random_chan_two[ ( t + perm_one ) % nsamp ];
}
}
}
finish = 0;
rounds = 1;
counter = 0;
mean_of_mean = 0.0;
var_of_mean = 0.0;
mean_of_var = 0.0;
var_of_var = 0.0;
old_mean_of_mean = 0.0;
old_var_of_mean = 0.0;
old_mean_of_var = 0.0;
old_var_of_var = 0.0;
for( int t = 0; t < (nsamp); t++ ) spectra_mask[ t ] = 1;
while( finish == 0 )
{
mean_of_mean = 0.0;
counter = 0;
for( int t = 0; t < (nsamp); t++ )
{
if( spectra_mask[ t ] == 1 )
{
mean_of_mean += spectra_mean[ t ];
counter++;
}
}
mean_of_mean /= counter;
var_of_mean = 0.0;
counter = 0;
for( int t = 0; t < (nsamp); t++ )
{
if( spectra_mask[ t ] == 1 )
{
var_of_mean += ( spectra_mean[ t ] - mean_of_mean ) * ( spectra_mean[ t ]- mean_of_mean );
counter++;
}
}
var_of_mean /= (counter);
var_of_mean = sqrt( var_of_mean );
mean_of_var = 0.0;
counter = 0;
for( int t = 0; t < (nsamp); t++ )
{
if( spectra_mask[ t ] == 1 )
{
mean_of_var += spectra_var[ t ];
counter++;
}
}
mean_of_var /= counter;
var_of_var = 0.0;
counter = 0;
for( int t = 0; t < (nsamp); t++ )
{
if( spectra_mask[ t ] == 1 )
{
var_of_var += ( spectra_var[ t ] - mean_of_var ) * ( spectra_var[ t ] - mean_of_var );
counter++;
}
}
var_of_var /= (counter);
var_of_var = sqrt( var_of_var );
for( int t = 0; t < (nsamp); t++) if( fabs( spectra_mean[ t ] - mean_of_mean ) / var_of_mean > sigma_cut || fabs( spectra_var[ t ] - mean_of_var ) / var_of_var > sigma_cut ) spectra_mask[ t ] = 0;
if(fabs(mean_of_mean - old_mean_of_mean) < 0.001 &&
fabs(var_of_mean - old_var_of_mean ) < 0.001 &&
fabs(mean_of_var - old_mean_of_var ) < 0.001 &&
fabs(var_of_var - old_var_of_var ) < 0.001)
{
finish = 1;
}
old_mean_of_mean = mean_of_mean;
old_var_of_mean = var_of_mean;
old_mean_of_var = mean_of_var;
old_var_of_var = var_of_var;
rounds++;
}
//printf("\n0 %lf %lf", mean_of_mean, var_of_mean);
//printf("\n0 %lf %lf", mean_of_var, var_of_var);
clipping_constant = 0.0;
for( int t = 0; t < nsamp; t++ ) clipping_constant += spectra_mask[ t ];
clipping_constant = ( nsamp - clipping_constant ) / nsamp;
clipping_constant = sqrt( -2.0 * log( clipping_constant * 2.506628275 ) );
// Perform spectral replacement
for( int t = 0; t < (nsamp); t++ )
{
if( fabs( ( spectra_mean[ t ] - mean_of_mean ) / var_of_mean ) > clipping_constant && fabs( ( spectra_var[ t ] - mean_of_var ) / var_of_var ) > clipping_constant )
{
////printf("\nReplacing Spectral %d %lf %lf", t, spectra_mean[t], spectra_var[t]);
int perm_one = (int)( ( (float)rand() / (float)RAND_MAX) * nchans );
for( int c = 0; c < nchans; c++ )
{
stage[ c * (size_t)nsamp + t ] = random_spectra_two[ ( c + perm_one ) % nchans ];
}
}
}
for( int c = 0; c < nchans; c++ )
{
for( int t = 0; t < (nsamp); t++ )
{
//(*input_buffer)[c + (size_t)nchans * t] = (unsigned char) ((stage[c * (size_t)nsamp + t]*orig_var)+orig_mean);
(*input_buffer)[ c + (size_t)nchans * t ] = (unsigned char) ( ( stage[ c * (size_t)nsamp + t ] * var_rescale ) + mean_rescale );
}
}
FILE *fp_mask = fopen ("masked_chans.txt", "w+");
for( int c = 0; c < nchans; c++ )
{
for( int t = 0; t < (nsamp) / file_reducer; t++ )
{
fprintf(fp_mask, "%d ", (unsigned char)((stage[c * (size_t)nsamp + t]*orig_var)+orig_mean));
fprintf( fp_mask, "%d ", (unsigned char)( ( stage[ c * (size_t)nsamp + t] * var_rescale ) + mean_rescale ) );
}
fprintf(fp_mask, "\n");
}
fclose(fp_mask);
//printf("\n%lf %lf", mean_rescale / orig_mean, var_rescale / orig_var);
free(chan_mask);
free(spectra_mask);
free(chan_mean);
free(chan_var);
free(spectra_mean);
free(spectra_var);
free(stage);
}
void readInputBuffer(unsigned short *input_buffer, int nsamp, int nchans, char fname[100])
{
//printf("in readInputBuffer %s\n", fname );
FILE *fp_inputBuffer = fopen( fname, "r" );
if( fp_inputBuffer == NULL )
{
//printf("Error opeing file\n" );
exit(0);
}
for( int i=0; i < nsamp * nchans; i++ )
{
fscanf(fp_inputBuffer, "%hu", &input_buffer[ i ] );
////printf("%hu ",input_buffer[i] );
}
//printf("done reading\n" );
fclose( fp_inputBuffer );
}
int main()
{
int nsamp = 491522;//10977282;
int nchans = 4096;
unsigned long long in_buffer_bytes = nsamp * nchans * sizeof( unsigned short );
unsigned short *in_buffer = ( unsigned short* )malloc( in_buffer_bytes );
unsigned short *input_buffer = in_buffer;
//unsigned long long i;
//#pragma omp parallel
// #pragma omp parallel for
// for(i=0; i<nsamp*nchans; i++)
// {
// in_buffer[i] = 100;
// }
char fname[100] = "input_buffer.txt";
readInputBuffer(in_buffer, nsamp, nchans, fname);
rfi(nsamp, nchans, &input_buffer);
return 0;
}
|
3,429 | #include "includes.h"
__global__ void copySimilarity(float* similarities, int active_patches, int patches, int* activeMask, int target, int source)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= active_patches)
return;
int patch = activeMask[i];
similarities[target*patches + patch] = similarities[source*patches + patch];
} |
3,430 | /*
Jaitirth Jacob - 13CO125 Vidit Bhargava - 13CO151
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void rotateArray(int *c, int numThreads)
{
int nextIndex = (threadIdx.x + 1)%numThreads;
int val = c[nextIndex];
__syncthreads();
c[threadIdx.x] = val;
}
#define N 1024
int main(void)
{
int *c, *res;
int *d_c;
int size = N * sizeof(int);
//Allocate memory for array in GPU
cudaMalloc((void **)&d_c, size);
//Allocate memory on host
c = (int *)malloc(size);
res = (int *)malloc(size);
srand(time(NULL));
//Populate array
for (int i = 0; i < N; ++i)
{
c[i] = rand()%20;
}
//Copy input to device
cudaMemcpy(d_c, c, size, cudaMemcpyHostToDevice);
//Launch rotateArray() kernel on GPU
rotateArray<<<1,N>>>(d_c, N);
//Copy result back to host
cudaMemcpy(res, d_c, size, cudaMemcpyDeviceToHost);
printf("First thirty elements are as follows:\n");
printf("Original\tNew\n");
for (int i = 0; i < 30; ++i)
{
printf("%d\t\t%d\n", c[i], res[i]);
}
//Cleanup
free(c); free(res);
cudaFree(d_c);
return 0;
}
|
3,431 | #include "includes.h"
__global__ void multiplyTanh(float* out, float* in1, float* in2, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size)
out[id] = in1[id] * in2[id];
} |
3,432 | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
int main(int argc, char** argv) {
int ct,dev;
cudaError_t code;
struct cudaDeviceProp prop;
cudaGetDeviceCount(&ct);
code = cudaGetLastError();
if(code) printf("%s\n", cudaGetErrorString(code));
if(ct == 0) {
printf("Cuda device not found.\n");
exit(0);
}
printf("Found %i Cuda device(s).\n",ct);
for (dev = 0; dev < ct; ++dev) {
printf("Cuda device %i\n", dev);
cudaGetDeviceProperties(&prop,dev);
printf("\tname : %s\n", prop.name);
printf("\ttotalGlobablMem: %lu\n", (unsigned long)prop.totalGlobalMem);
printf("\tsharedMemPerBlock: %i\n", (int)prop.sharedMemPerBlock);
printf("\tregsPerBlock: %i\n", prop.regsPerBlock);
printf("\twarpSize: %i\n", prop.warpSize);
printf("\tmemPitch: %i\n", (int)prop.memPitch);
printf("\tmaxThreadsPerBlock: %i\n", prop.maxThreadsPerBlock);
printf("\tmaxThreadsDim: %i, %i, %i\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("\tmaxGridSize: %i, %i, %i\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("\tclockRate: %i\n", prop.clockRate);
printf("\ttotalConstMem: %i\n", (int)prop.totalConstMem);
printf("\tmajor: %i\n", prop.major);
printf("\tminor: %i\n", prop.minor);
printf("\ttextureAlignment: %i\n", (int)prop.textureAlignment);
printf("\tdeviceOverlap: %i\n", prop.deviceOverlap);
printf("\tmultiProcessorCount: %i\n", prop.multiProcessorCount);
}
}
|
3,433 | #include "includes.h"
__global__ void initialize_rho(float* rho, int size_c, int nc) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int c = blockIdx.y*blockDim.y + threadIdx.y;
if (i < size_c && c < nc) {
rho[c*(size_c)+i] = 0.5f;
}
} |
3,434 | #include "includes.h"
__device__ size_t GIDX(size_t row, size_t col, int H, int W) {
return row * W + col;
}
__global__ void kernel_sub(float* d_f1ptr, float* d_f2ptr, float* d_dt, int H, int W) {
size_t row = threadIdx.y + blockDim.y * blockIdx.y;
size_t col = threadIdx.x + blockDim.x * blockIdx.x;
size_t idx = GIDX(row, col, H, W);
if (row >= H || col >= W) {
return;
}
d_dt[idx] = d_f2ptr[idx] - d_f1ptr[idx];
} |
3,435 | #define CUDA_BLOCK_X 128
#define CUDA_BLOCK_Y 1
#define CUDA_BLOCK_Z 1
__global__ void _auto_kernel_0(int a[100][2][4])
{
int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x;
int thread_y_id;thread_y_id = blockIdx.y * blockDim.y + threadIdx.y;
int thread_z_id;thread_z_id = blockIdx.z * blockDim.z + threadIdx.z;
if (thread_x_id && (thread_y_id && thread_z_id))
if (thread_x_id <= 100 && (thread_y_id <= 2 && thread_z_id <= 15)) {
a[2 * thread_x_id + -2][2 * thread_y_id + -2][-1 * thread_z_id + 16] = a[2 * thread_x_id + -1][2 * thread_y_id + -1][-1 * thread_z_id + 16];
}
}
int main()
{
int a[100][2][4];
int i;
int j;
int k;
i = 0;
{
{
/* Auto-generated code for call to _auto_kernel_0 */
typedef int _narray_a[2][4];
_narray_a *d_a;
cudaMalloc((void **) &d_a, sizeof(int ) * 100 * 2 * 4);
cudaMemcpy(d_a, a, sizeof(int ) * 100 * 2 * 4, cudaMemcpyHostToDevice);
int CUDA_GRID_X;
CUDA_GRID_X = (100 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X;
int CUDA_GRID_Y;
CUDA_GRID_Y = (2 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y;
int CUDA_GRID_Z;
CUDA_GRID_Z = (4 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z;
const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z);
const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z);
_auto_kernel_0<<<CUDA_gridSize,CUDA_blockSize>>>(d_a);
cudaMemcpy(a, d_a, sizeof(int ) * 100 * 2 * 4, cudaMemcpyDeviceToHost);
}
}
return 2;
}
|
3,436 | #include "includes.h"
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d[256];
if (i < N) {
d[threadIdx.x%16]= A[i] + B[i];
C[i] = d[threadIdx.x%8];
}
} |
3,437 | #include <cuda.h>
#include <cuda_runtime_api.h>
__global__ void cuda_sum_kernel(float *a, float *b, float *c, size_t size)
{
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size) {
return;
}
c[idx] = a[idx] + b[idx];
}
extern "C" {
void cuda_sum(float *a, float *b, float *c, size_t size)
{
float *d_a, *d_b, *d_c;
cudaMalloc((void **)&d_a, size * sizeof(float));
cudaMalloc((void **)&d_b, size * sizeof(float));
cudaMalloc((void **)&d_c, size * sizeof(float));
cudaMemcpy(d_a, a, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size * sizeof(float), cudaMemcpyHostToDevice);
cuda_sum_kernel <<< ceil(size / 256.0), 256 >>> (d_a, d_b, d_c, size);
cudaMemcpy(c, d_c, size * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
} |
3,438 | //--------------------------------------------------
// Autor: Ricardo Farias
// Data : 29 Out 2011
// Goal : Increment a variable in the graphics card
//--------------------------------------------------
/***************************************************************************************************
Includes
***************************************************************************************************/
#include <cuda.h>
#include <iostream>
#include <iomanip>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
#include <pthread.h>
bool volatile lock = true;
__global__ void somaUm( int *a ) {
atomicAdd( a, 1 );
}
void *f1( void *x ) {
int i;
i = *(int *)x;
printf("\tf1: %d\n",i);
int h_a = 0;
int deviceCount = 0;
cudaGetDeviceCount( &deviceCount );
// This function call returns 0 if there are no CUDA capable devices.
if( deviceCount == 0 ) {
printf("There is no device supporting CUDA\n");
exit( 1 );
}
cudaSetDevice(1);
int *d_a; // Pointer to host & device arrays
cudaMalloc( (void **) &d_a, sizeof( int ) ) ;
// Copy array to device
cudaMemcpy( d_a, &h_a, sizeof(int), cudaMemcpyHostToDevice ) ;
printf( "Valor de a antes = %d\n", h_a );
//------------------------------------------------
lock = true;
somaUm<<< 1, 5 >>>( d_a );
cudaMemcpy( &h_a, d_a, sizeof(int), cudaMemcpyDeviceToHost ) ;
printf( "\tValor de a depois da chamada do filho f1 = %d\n", h_a );
lock = false;
pthread_exit(0);
}
void *f2( void *x ) {
int i;
i = *(int *)x;
while( lock );
printf("\tf2: %d\n",i);
int h_a = 0;
int deviceCount = 0;
cudaGetDeviceCount( &deviceCount );
// This function call returns 0 if there are no CUDA capable devices.
if( deviceCount == 0 ) {
printf("There is no device supporting CUDA\n");
exit( 1 );
}
int *d_a; // Pointer to host & device arrays
cudaMalloc( (void **) &d_a, sizeof( int ) ) ;
// Copy array to device
cudaMemcpy( d_a, &h_a, sizeof(int), cudaMemcpyHostToDevice ) ;
printf( "Valor de a antes = %d\n", h_a );
//------------------------------------------------
lock = true;
somaUm<<< 5, 5 >>>( d_a );
cudaMemcpy( &h_a, d_a, sizeof(int), cudaMemcpyDeviceToHost ) ;
printf( "\tValor de a depois da chamada do filho f2 = %d\n", h_a );
pthread_exit(0);
}
int main() {
pthread_t f2_thread, f1_thread;
void *f2(void*), *f1(void*);
int i1,i2;
i1 = 1;
i2 = 2;
pthread_create( &f1_thread, NULL, f1, &i1 );
pthread_create( &f2_thread, NULL, f2, &i2 );
pthread_join( f1_thread, NULL );
pthread_join( f2_thread, NULL );
return 1;
}
|
3,439 | #include <stdio.h>
int main()
{
/*
* 以下の出力文字列で現在アクティブな GPU の要求された
* プロパティを出力するために、これらの変数に値を代入します。
*/
int deviceId;
int computeCapabilityMajor;
int computeCapabilityMinor;
int multiProcessorCount;
int warpSize;
/*
* 以下の出力文字列を変更する必要はありません。
*/
printf("Device ID: %d\nNumber of SMs: %d\nCompute Capability Major: %d\nCompute Capability Minor: %d\nWarp Size: %d\n", deviceId, multiProcessorCount, computeCapabilityMajor, computeCapabilityMinor, warpSize);
}
|
3,440 | // From CUDA for Engineers
// Listing 8.3: dist_1d_thrust
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <iostream>
#define N 64
using namespace thrust::placeholders; // _1
struct SqrtOf{
__host__ __device__
float operator()(float x) {
return sqrt(x);
}
};
int main()
{
const float ref = 0.5f;
thrust::device_vector<float> d_x(N);
thrust::device_vector<float> d_dist(N);
thrust::sequence(d_x.begin(), d_x.end());
thrust::transform(d_x.begin(), d_x.end(), d_x.begin(), _1/(N-1));
thrust::transform(d_x.begin(), d_x.end(), d_dist.begin(), (_1 - ref)*(_1 - ref));
thrust::transform(d_dist.begin(), d_dist.end(), d_dist.begin(), SqrtOf());
thrust::host_vector<float> h_x = d_x;
thrust::host_vector<float> h_dist = d_dist;
for (int i = 0; i < N; i++) {
printf("x=%3.3f, dist=%3.3f\n", h_x[i], h_dist[i]);
}
std::cout << "dist_1d_thrust\n";
return 0;
}
|
3,441 | #include "includes.h"
__global__ static void findRHS(double* cOld, double* cCurr, double* cHalf, double* cNonLinRHS, int nx)
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set index being computed
int index = globalIdy * nx + globalIdx;
// Set the RHS for inversion
cHalf[index] += - (2.0 / 3.0) * (cCurr[index] - cOld[index]) + cNonLinRHS[index];
// Set cOld to cCurr
cOld[index] = cCurr[index];
} |
3,442 | #include "includes.h"
extern "C" {
#ifndef NUMBER
#define NUMBER float
#endif
}
__global__ void vector_set (const int n, const NUMBER val, NUMBER* x, const int offset_x, const int stride_x) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
x[offset_x + gid * stride_x] = val;
}
} |
3,443 | #include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <stdint.h>
#include <assert.h>
#include <time.h>
#include <math.h>
/*
Author: Andrew DiPrinzio
Course: EN605.417.FA
*/
static const uint32_t DEFAULT_NUM_THREADS = 1024;
static const uint32_t DEFAULT_BLOCK_SIZE = 16;
static void usage(){
printf("Usage: ./assignment3 [-t <num_threads>] [-b <block_size>] [-h]\n");
printf("\t-t: Specify the number of threads. <num_threads> must be greater than 0. Optional (default %u)\n", DEFAULT_NUM_THREADS);
printf("\t-b: Specify the size of each block. <block_size> must be greater than 0. Optional (default %u)\n", DEFAULT_BLOCK_SIZE);
}
// Structure that holds program arguments specifying number of threads/blocks
// to use.
typedef struct {
uint32_t num_threads;
uint32_t block_size;
} Arguments;
// Parse the command line arguments using getopt and return an Argument structure
// GetOpt requies the POSIX C Library
static Arguments parse_arguments(const int argc, char ** argv){
// Argument format string for getopt
static const char * _ARG_STR = "ht:b:";
// Initialize arguments to their default values
Arguments args;
args.num_threads = DEFAULT_NUM_THREADS;
args.block_size = DEFAULT_BLOCK_SIZE;
// Parse any command line options
int c;
int value;
while ((c = getopt(argc, argv, _ARG_STR)) != -1) {
switch (c) {
case 't':
value = atoi(optarg);
args.num_threads = value;
break;
case 'b':
// Normal argument
value = atoi(optarg);
args.block_size = value;
break;
case 'h':
// 'help': print usage, then exit
// note the fall through
usage();
default:
exit(-1);
}
}
return args;
}
//Kernel that adds two vectors
__global__
void add_ab(int *a, const int *b)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
a[thread_idx] += b[thread_idx];
}
// Helper function to generate a random number within a defined range
int random(int min, int max){
return min + rand() / (RAND_MAX / (max - min + 1) + 1);
}
void run_vector_add(Arguments args)
{
printf("Running random vector add with %u threads and a block size of %u\n", args.num_threads, args.block_size);
int array_size = args.num_threads;
const unsigned int array_size_in_bytes = array_size * sizeof(int);
/* Randomly generate input vectors and dynamically allocate their memory */
int * a;
int * b;
a = (int*)malloc(array_size * sizeof(int));
b = (int*)malloc(array_size * sizeof(int));
int i;
for (i = 0; i < array_size; i++) {
a[i] = random(0,100);
}
for (i = 0; i < array_size; i++) {
b[i] = random(0,100);
}
/* Declare pointers for GPU based params */
int *a_d;
int *b_d;
cudaMalloc((void**)&a_d, array_size_in_bytes);
cudaMalloc((void**)&b_d, array_size_in_bytes);
cudaMemcpy( a_d, a, array_size_in_bytes, cudaMemcpyHostToDevice );
cudaMemcpy( b_d, b, array_size_in_bytes, cudaMemcpyHostToDevice );
const unsigned int num_blocks = array_size / args.block_size;
const unsigned int num_threads_per_blk = array_size/num_blocks;
/* Execute our kernel */
add_ab<<<num_blocks, num_threads_per_blk>>>(a_d, b_d);
/* Free the arrays on the GPU as now we're done with them */
cudaMemcpy(a, a_d, array_size_in_bytes, cudaMemcpyDeviceToHost );
cudaFree(a_d);
cudaFree(b_d);
/* Iterate through the result array and print */
for(unsigned int i = 0; i < array_size; i++)
{
printf("Sum #%d: %d\n",i,a[i]);
}
}
int main(int argc, char ** argv)
{
Arguments args = parse_arguments(argc, argv);
run_vector_add(args);
return EXIT_SUCCESS;
} |
3,444 | #include <iostream>
#include <vector>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <curand.h>
using namespace std;
constexpr unsigned int ITERATIONS = 1 << 24;
constexpr unsigned int ITERATIONS_KERNEL = 1 << 16;
constexpr unsigned int TOTAL_KERNELS = ITERATIONS / ITERATIONS_KERNEL;
__global__ void monte_carlo_pi(const float2 *points, char *results)
{
// Calculate index
unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
// Get the point to work on
float2 point = points[idx];
// Calculate the length - not built-in
float l = sqrtf((point.x * point.x) + (point.y * point.y));
// Check if in circle
if (l <= 1.0)
results[idx] = 1;
else
results[idx] = 0;
}
__global__ void monte_carlo_pi(unsigned int iterations, float2 *points, int *results)
{
// Calculate index
unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
// Calculate starting point
unsigned int start = idx * iterations;
// Calculate end point
unsigned int end = start + iterations;
// Set starting result to 0
results[idx] = 0;
// Loop for iterations
for (unsigned int i = start; i < end; ++i)
{
// Get the point to work on
float2 point = points[i];
// Calculate the length
float l = sqrtf((point.x * point.x) + (point.y * point.y));
// Check length and add to result accordingly
if (l <= 1.0f)
++results[idx];
}
}
int main(int argc, char **argv)
{
// Allocate host memory for results
// For first approach
// vector<char> results(ITERATIONS);
// For second approach
vector<int> results(TOTAL_KERNELS);
// Allocate device memory for random data
float2 *point_buffer;
cudaMalloc((void**)&point_buffer, sizeof(float2) * ITERATIONS);
// Allocate device memory for results data
// For first approach
// char *result_buffer;
// cudaMalloc((void**)&result_buffer, sizeof(char) * ITERATIONS);
// For second approach
int *result_buffer;
cudaMalloc((void**)&result_buffer, sizeof(int) * TOTAL_KERNELS);
// Create random values on the GPU
// Create generator
curandGenerator_t rnd;
curandCreateGenerator(&rnd, CURAND_RNG_QUASI_SOBOL32);
curandSetQuasiRandomGeneratorDimensions(rnd, 2);
curandSetGeneratorOrdering(rnd, CURAND_ORDERING_QUASI_DEFAULT);
// Generate random numbers - point_buffer is an allocated device buffer
curandGenerateUniform(rnd, (float*)point_buffer, 2 * ITERATIONS);
// Destroy generator
curandDestroyGenerator(rnd);
// Execute kernel
monte_carlo_pi<<<TOTAL_KERNELS / 128, 128>>>(ITERATIONS_KERNEL, point_buffer, result_buffer);
// Wait for kernel to complete
cudaDeviceSynchronize();
// Read output buffer back to host
cudaMemcpy(&results[0], result_buffer, sizeof(int) * TOTAL_KERNELS, cudaMemcpyDeviceToHost);
// Sum
int in_circle = 0;
for (auto &v : results)
in_circle += v;
float pi = (4.0f * static_cast<float>(in_circle)) / static_cast<float>(ITERATIONS);
cout << "pi = " << pi << endl;
cudaFree(result_buffer);
cudaFree(point_buffer);
return 0;
} |
3,445 | // System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
extern "C" __global__ void
maxwell_sgemm_64x64_raggedMn_nt(float *B, float *A, float *C, int ldb, int lda, int ldc, int N, int M, int K, float *alpha, float *beta, float alpha_, float beta_, int flag)
{
__shared__ float smem[2048];
int tid = threadIdx.x;
float val = (tid > 32) ? A[tid] : B[tid];
smem[tid] = val;
__syncthreads();
C[tid] = smem[tid];
}
|
3,446 | #include "includes.h"
__global__ void cudaSGatherRP_kernel( unsigned int inputSizeX, unsigned int inputSizeY, unsigned int nbAnchors, unsigned int batchSize, const float* inputs, const int* i, const int* j, const int* k, const int* b, const int* mask, float* outputs, int* anchors, unsigned int topN, const unsigned int nbProposals)
{
const int batchPos = blockIdx.z;
const int sortOffset = batchPos*topN;
const int index = (threadIdx.x & 0x1f) + blockIdx.x*blockDim.x;
const int totalIndex = index + sortOffset;
const int batchIndex = index + batchPos*nbProposals;
if(index < nbProposals)
{
unsigned int xIdx = i[ mask[totalIndex] + sortOffset ]
+ j[ mask[totalIndex] + sortOffset ]*inputSizeX
+ (k[ mask[totalIndex] + sortOffset ] + nbAnchors)*inputSizeX*inputSizeY
+ b[ mask[totalIndex] + sortOffset ]*nbAnchors*inputSizeX*inputSizeY*6;
unsigned int yIdx = i[ mask[totalIndex] + sortOffset ]
+ j[ mask[totalIndex] + sortOffset ]*inputSizeX
+ (k[ mask[totalIndex] + sortOffset ] + 2*nbAnchors)*inputSizeX*inputSizeY
+ b[ mask[totalIndex] + sortOffset ]*nbAnchors*inputSizeX*inputSizeY*6;
unsigned int wIdx = i[ mask[totalIndex] + sortOffset ]
+ j[ mask[totalIndex] + sortOffset ]*inputSizeX
+ (k[ mask[totalIndex] + sortOffset ] + 3*nbAnchors)*inputSizeX*inputSizeY
+ b[ mask[totalIndex] + sortOffset ]*nbAnchors*inputSizeX*inputSizeY*6;
unsigned int hIdx = i[ mask[totalIndex] + sortOffset ]
+ j[ mask[totalIndex] + sortOffset ]*inputSizeX
+ (k[ mask[totalIndex] + sortOffset ] + 4*nbAnchors)*inputSizeX*inputSizeY
+ b[ mask[totalIndex] + sortOffset ]*nbAnchors*inputSizeX*inputSizeY*6;
anchors[0 + (batchIndex)*4] = i[mask[totalIndex]];
anchors[1 + (batchIndex)*4] = j[mask[totalIndex]];
anchors[2 + (batchIndex)*4] = k[mask[totalIndex]];
anchors[3 + (batchIndex)*4] = b[mask[totalIndex]];
outputs[0 + (batchIndex)*4] = inputs[xIdx];
outputs[1 + (batchIndex)*4] = inputs[yIdx];
outputs[2 + (batchIndex)*4] = inputs[wIdx];
outputs[3 + (batchIndex)*4] = inputs[hIdx];
}
} |
3,447 | /* Command to compile on Windows:
nvcc .\lab5_2_1.cu -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64"
*/
#include <stdio.h>
__global__ void hello_GPU(void) {
if (blockIdx.x == 0 && threadIdx.x > 3) {
return;
}
printf("Hello from GPU%i[%i]!\n", blockIdx.x + 1, threadIdx.x);
}
int main(void) {
printf("Hello from CPU!\n");
hello_GPU<<<2, 6>>>();
cudaDeviceSynchronize();
return 0;
} |
3,448 | //#include "StdAfx.h"
#include <iostream>
#include <stdio.h>
#include <cuda_runtime_api.h>
#define MAX_FILE_NAME_CHARS 40
#define MAX_OUTFILE_NAME_CHARS 45
#define FRAMES_PER_ITER 256
#define PROMETHEUS_TESLA_C2075 1
using namespace std;
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{ fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
__global__ void cudafunc(char *in_file, char *out_file)
{
unsigned long long StartIdx = ((blockIdx.x * blockDim.x) + threadIdx.x) * 128;
unsigned long long EndIdx = ((blockIdx.x * blockDim.x) + threadIdx.x + 1) * 128;
unsigned long long OneBlockSize = blockDim.x * 128;
unsigned long long i;
char temp;
for(i=StartIdx; i<EndIdx; i++)
{ temp = in_file[i + OneBlockSize] - in_file[i];
if(temp < 0)
out_file[i] = (-1 * temp);
else out_file[i] = temp;
}
}
class Vid
{
public:
FILE *InFile;
FILE *OutFile;
char FileName[MAX_FILE_NAME_CHARS];
char OutFileName[MAX_OUTFILE_NAME_CHARS];
int Wdth, Hght;
int TotFrame;
unsigned long long Totalbytes;
float PlayTime;
char *VideoFrameData;
char *OutFrameData;
int BytesPerFrame;
int FramesInThisIter;
bool init(char *);
bool open_file(); //opens both input and output
bool Prt_vid();
bool ReadFrames();
};
bool Vid::init(char *Fname)
{
if(MAX_FILE_NAME_CHARS < strlen(Fname))
{
printf("File Name is bigger than space allocated\n");
exit(20);
}
memset(FileName, 0, MAX_FILE_NAME_CHARS);
strcpy(FileName, Fname);
memset(OutFileName, 0, MAX_OUTFILE_NAME_CHARS);
memcpy(OutFileName, FileName, (strlen(FileName)-4));
strcat(OutFileName, "_diff.yuv\0");
Wdth = Hght = TotFrame =0;
Totalbytes = TotFrame =0;
return 1;
}
bool Vid::open_file()
{
InFile = fopen(FileName, "rb");
if(NULL == InFile)
{
printf("Possibly %s doesnt exist, please check, exiting", FileName);
exit(21);
}
OutFile = fopen(OutFileName, "wb");
if(NULL == OutFile)
{
printf("Problem in creating output file, exiting", FileName);
exit(22);
}
//gettting total number of frames
if(!fseek(InFile, 0, SEEK_END))
{ Totalbytes = ftell(InFile);
fseek(InFile, 0, SEEK_SET);
TotFrame = (int)(Totalbytes / ((Wdth * Hght * 3) / 2));
}
BytesPerFrame = (Wdth * Hght * 3)/2;
return 1;
}
bool Vid::Prt_vid()
{
cout<<" InFile : "<<InFile<<endl;
cout<<" OutFile : "<<OutFile<<endl;
cout<<" FileName : "<<FileName<<endl;
cout<<" OutFileName : "<<OutFileName<<endl;
cout<<" Wdth : "<<Wdth<<" Hght : "<<Hght<<endl;
cout<<" Totalbytes : "<<Totalbytes<<endl;
cout<<" TotFrame : "<<TotFrame<<endl;
cout<<" BytesPerFrame : "<<BytesPerFrame<<endl;
cout<<" Sizeof(char) : "<<sizeof(char)<<endl;
return 1;
}
bool Vid::ReadFrames()
{
return 1;
}
int main(int argc, char* argv[])
{
//assume video is .y4m file with headers and "FRAME" to separate frame data
//of YUV 4:2:0 format
Vid Video;
if(4 != argc)
{ printf("More params needed, format is %s <Filename> <Width> <Height>", argv[0]);
}
Video.init(argv[1]);
Video.Wdth = (int) atoi(argv[2]);
Video.Hght = (int) atoi(argv[3]);
Video.open_file();
Video.Prt_vid();
int FramesPerIter = 256;
Video.VideoFrameData = new char [FramesPerIter * Video.BytesPerFrame];
Video.OutFrameData = new char[(FramesPerIter - 1) * Video.BytesPerFrame];
// select device #1, Tesla C2075
if( cudaSetDevice(PROMETHEUS_TESLA_C2075) != cudaSuccess ) exit( 1 );
char *c_InVid;
char *c_OutVid;
CudaSafeCall(cudaMalloc((void **)&c_InVid,(FramesPerIter * Video.BytesPerFrame * sizeof(char))));
CudaSafeCall(cudaMalloc((void **)&c_OutVid,((FramesPerIter - 1) * Video.BytesPerFrame * sizeof(char))));
memset(Video.OutFrameData, 100, ((FramesPerIter - 1) * Video.BytesPerFrame * sizeof(char)));
int FrameRemaining = Video.TotFrame;
while(FrameRemaining > 0)
{
int temp = FrameRemaining - FRAMES_PER_ITER;
if(temp > 0)
{
Video.FramesInThisIter = FRAMES_PER_ITER;
FrameRemaining = FrameRemaining - FRAMES_PER_ITER + 1;
}
else if(1 == FrameRemaining)
{ break;
//last frame remaining, can't find it's difference
}
else
{ //take the remaining no. of frames
Video.FramesInThisIter = FrameRemaining;
FrameRemaining = 0;
}
//read no. of frames
cout<<" FramesInThisIter "<<Video.FramesInThisIter<<endl;
if(ftell(Video.InFile) > (10 * Video.BytesPerFrame))//just to check not in beginning of file
{ fseek(Video.InFile, -Video.BytesPerFrame, SEEK_CUR);
FrameRemaining += 1;
}
fread(Video.VideoFrameData, sizeof(char), (Video.FramesInThisIter * Video.BytesPerFrame), Video.InFile);
//send it to CUDA
unsigned long long TtlBytesToSend = Video.FramesInThisIter * Video.BytesPerFrame * sizeof(char);
cudaMemcpy(c_InVid, Video.VideoFrameData, TtlBytesToSend, cudaMemcpyHostToDevice);
int blocks = Video.FramesInThisIter - 1;
int threads = Video.BytesPerFrame / 128;
cout<<" blocks "<<blocks<<endl;
cout<<" threads "<<threads<<endl;
cudafunc<<<blocks,threads>>>(c_InVid, c_OutVid);
cout<<" After call done "<<endl;
CudaCheckError();
cout<<" CudaCheckeror done"<<endl;
//CUDA does calculation
cudaMemcpy(Video.OutFrameData, c_OutVid, ((Video.FramesInThisIter - 1)* Video.BytesPerFrame * sizeof(char)), cudaMemcpyDeviceToHost);
//get it back from CUDA
//concatenate it to ouput file
fwrite(Video.OutFrameData, sizeof(char), ((Video.FramesInThisIter - 1)* Video.BytesPerFrame * sizeof(char)), Video.OutFile);
}
fflush(Video.OutFile);
fclose(Video.OutFile);
fclose(Video.InFile);
cudaFree(c_InVid);
cudaFree(c_OutVid);
free(Video.VideoFrameData);
free(Video.OutFrameData);
return 1;
}
|
3,449 | #include <thrust/system/omp/vector.h>
#include <thrust/system/tbb/vector.h>
#include <thrust/iterator/retag.h>
#include <cstdio>
struct omp_hello
{
void operator()(int x)
{
printf("Hello, world from OpenMP!\n");
}
};
struct tbb_hello
{
void operator()(int x)
{
printf("Hello, world from TBB!\n");
}
};
int main()
{
thrust::omp::vector<int> omp_vec(2, 7);
thrust::tbb::vector<int> tbb_vec(2, 13);
thrust::for_each(thrust::reinterpret_tag<thrust::tbb::tag>(omp_vec.begin()),
thrust::reinterpret_tag<thrust::tbb::tag>(omp_vec.end()),
tbb_hello());
thrust::for_each(thrust::reinterpret_tag<thrust::omp::tag>(tbb_vec.begin()),
thrust::reinterpret_tag<thrust::omp::tag>(tbb_vec.end()),
omp_hello());
}
|
3,450 | #include <iostream>
#include <math.h>
__global__
void Dense(int n, int m, float *W, float *b, float *x, float* y)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int jn = j*n;
float r = b[j];
for (int i = 0; i < n; i++)
r = r+W[jn+i]*x[i];
y[j] = r;
}
int main(void)
{
int N = 1024;
int M = 2048;
int blockSize = 128;
int numBlocks = 2048/blockSize;
float *x, *y, *b, *W;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, M*sizeof(float));
cudaMallocManaged(&b, M*sizeof(float));
cudaMallocManaged(&W, M*N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f*i;
}
for (int j = 0; j < M; j++) {
b[j] = -0.5f*N*(N-1)+3.5f;
}
for (int j=0;j<M;j++)
for(int i=0;i<N;i++) {
W[j*N+i]=1;
}
for(int t=0;t<1000;t++)
Dense<<<numBlocks, blockSize>>>(N, M, W, b, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int j = 0; j < M; j++)
maxError = fmax(maxError, fabs(y[j]-3.5f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(W);
cudaFree(b);
cudaFree(x);
cudaFree(y);
return 0;
}
|
3,451 | /*
Program name: HelloGPU_ThreadOrganization.cu
Author name: Dr. Nileshchandra Pikle
Email: nilesh.pikle@gmail.com
Contact Number: 7276834418
Webpage: https://piklenileshchandra.wixsite.com/personal
Purpose: To demonstarte
1. How to write CUDA program
2. Calling CUDA kernel
3. How to compile & run CUDA program
4. How to retrieve thread attributes such as thread Ids, blockIds and block dimension
Discrition:
* Given two functions helloCPU() and helloGPU()
helloCPU() function is executed on CPU and prints message
"Hello from the CPU."
* helloGPU() function is executed on GPU and prints message
"Hello from the GPU."
* __global__ keyword before a function indicates that function to be executed on GPU
* <<<numT, numB>>> this specifies the number of threads per block (numT) and
number of thread blocks (numB) lunched for the function/kernel helloGPU()
* threadIdx.x gives thread identification number BUT LOCAL TO THREAD BLOCK,
* blockIdx.x gives block identification number,
* blockDim.x gives number of threads in block
To compile nvcc -arch=sm_35 1_HelloGPU_ThreadOrganization.cu
To Run ./a.out
*/
#include <stdio.h>
void helloCPU()
{
printf("Hello from the CPU.\n");
}
__global__ void helloGPU()
{
int tid = threadIdx.x; // thread number
int bid = blockIdx.x; // block number
int bdim = blockDim.x; // number of threads per block
printf("threadID = %d blockId = %d block Dimension = %d \n", tid, bid, bdim);
}
int main()
{
helloCPU();
// helloGPU<<<Num_Thread_Blocks, Num_Threads_Per_Block>>>();
helloGPU<<<3,3>>>(); // kernel launch with
cudaDeviceSynchronize(); // To synchronize CPU and GPU
}
|
3,452 | #include <stdio.h>
#define N 160
#define THREADS 16
__global__ void max_kernel(float *A, float * max){
int i = blockDim.x*blockIdx.x+threadIdx.x;
__shared__ float smax[THREADS];
smax[threadIdx.x] = A[i];
for(unsigned int s = blockDim.x/2; s > 0; s>>=1){
if(threadIdx.x < s){
if(smax[threadIdx.x] < smax[threadIdx.x+s]){
smax[threadIdx.x] = smax[threadIdx.x+s];
__syncthreads();
}
}
}
if(threadIdx.x == 0){
*max = smax[threadIdx.x];
}
}
int main(){
float A[N], max, *maxs;
float *A_d, *max_d;
int i;
dim3 dimBlock(THREADS);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x);
max = -10000.0;
for(i = 0; i < N;i++){
A[i] = rand()/(float)(1<<30);
printf("%5.2f ", A[i]);
if(A[i] > max)
max = A[i];
}
printf("\nCPU -- %5.2f\n", max);
cudaMalloc((void**) &A_d, sizeof(float)*N);
cudaMalloc((void**) &max_d, sizeof(float)*dimGrid.x);
cudaMemcpy(A_d, A, sizeof(float)*N, cudaMemcpyHostToDevice);
max_kernel <<<dimGrid, dimBlock>>> (A_d, max_d);
maxs = (float*)malloc(sizeof(float)*dimGrid.x);
cudaMemcpy(maxs, max_d, sizeof(float)*dimGrid.x, cudaMemcpyDeviceToHost);
max = -10000.0;
for(i = 0;i<dimGrid.x;i++){
if(maxs[i] > max){
max = maxs[i];
}
}
printf("CPU--%5.2f \n", max);
cudaFree(A_d);
cudaFree(max_d);
free(maxs);
}
|
3,453 | #include "foo.cuh"
#define CHECK(res) { if(res != cudaSuccess){printf("Error :%s:%d , ", __FILE__,__LINE__); \
printf("code : %d , reason : %s \n", res,cudaGetErrorString(res));exit(-1);}}
__global__ void foo()
{
printf("CUDA!\n");
}
void useCUDA()
{
foo<<<1,25>>>();
CHECK(cudaDeviceSynchronize());
}
|
3,454 | #include <iostream>
#include <cstdlib>
#include <cuda.h>
#include <map>
#include <fstream>
using namespace std;
#define delta 10
#define rows 1000
#define columns 1000
int* findBarrier(int x, int y, int * Map[columns]){
//y-координаты препятствий
int *yCoordinates = new int [columns];
//текущее значение разности м-у двумя точками по вертикали
int currDelta = 0;
for(int i = 0; i < columns; i++){
//рассматриваем область выше параллели, на которой стоит робот
for(int j = y; j > 0; j--){
currDelta = Map[j][i] - Map[j-1][i];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > delta){
yCoordinates[i] = j-1;
break;
}
}
}
return yCoordinates;
}
__global__ void SomeKernel(int* res, int* data, int col, int row,int y, int step)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
//Считаем идентификатор текущего потока
int currDelta = 0;
for (int i=step*threadId; (i<(threadId+1)*step) && (i < col); i++) //Работа со столбцами по потокам
{
for (int j = y; j > 0; j--) //Здесь работа со строками
{
currDelta = data[i + j*row] - data[i + (j-1)*row];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > 10){
res[i] = j-1;
break;
}
}
}
}
//int argc, char* argv[]
int main(int argc, char* argv[]){
map<int,float> Results;
//int numbOfBlock;
//int numbOfThread;
for (int numbOfBlock = 1; numbOfBlock <= 1; numbOfBlock++ )
{
for(int numbOfThread = 1; numbOfThread <= columns; numbOfThread+=10){
//if(columns % numbOfBlock == 0){
//numbOfThread = 1;
//if (argc > 1)
// numbOfBlock = atoi(argv[1]);
//else
// numbOfBlock = 1;
//if (argc > 2)
// numbOfThread = atoi(argv[2]);
//else
// numbOfThread = 1;
//левая и правая границы высот для генерации
const int r_left = -5, r_right = 5;
//Координаты робота на карте
//int x = rows - 1;
int y = columns - 1;
//Карта высот
int **Map = new int* [rows];
int* resH = (int*)malloc(rows*columns * sizeof(int));
for (int i=0; i<columns; i++)
resH[i] = 0;
//Заполнение карты случайыми высотами
for(int i = 0; i < rows; i++){
Map[i] = new int [columns];
for(int j = 0; j < columns; j++){
if(j!=0)
Map[i][j] = rand()%(r_left - r_right) + r_left;
else
Map[i][j] = 20;
}
}
//Помещаем двумерный массив высот в одномерный
int* dataH = (int*)malloc(columns * rows * sizeof(int));
for (int i=0; i<columns; i++)
for (int j=0; j<rows; j++)
dataH[columns*i + j] = Map[i][j];
cudaEvent_t start, stopCopyTo, stopWork, stopCopyFrom;
cudaEventCreate(&start);
cudaEventCreate(&stopCopyTo);
cudaEventCreate(&stopWork);
cudaEventCreate(&stopCopyFrom);
int* dataDevice;
int* resDevice;
//Выделяем память на GPU под созданный массив
cudaMalloc((void**)&dataDevice, (rows * columns) * sizeof(int));
cudaMalloc((void**)&resDevice, (columns) * sizeof(int));
// Копирование исходных данных в GPU для обработки
cudaEventRecord(start);
cudaMemcpy(dataDevice, dataH, (rows * columns) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(resDevice, resH, (columns)*sizeof(int), cudaMemcpyHostToDevice);
dim3 threads = dim3(numbOfThread);
dim3 blocks = dim3(numbOfBlock);
cudaEventRecord(stopCopyTo);
SomeKernel<<<blocks, threads>>>( resDevice,
dataDevice,
columns,
rows,
y,
(rows * columns)/(numbOfBlock*numbOfThread));
cudaEventRecord(stopWork);
cudaMemcpy(dataH, dataDevice, (rows * columns) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(resH, resDevice, (columns) * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stopCopyFrom);
//cout << "Result vector: ";
// for (int i=0; i<5; i++)
// {
// cout << resH[i] << " ";
// }
//
// cout<<'\t';
for(int i = 0; i < columns; i++){
delete[] Map[i];
}
float t1,t2,t3;
cudaEventElapsedTime(&t1, start, stopCopyTo);
cudaEventElapsedTime(&t2, stopCopyTo, stopWork);
cudaEventElapsedTime(&t3, stopWork, stopCopyFrom);
//cout<<"Threads: "<< numbOfBlock*numbOfThread <<"\tTime: "<<t2<<endl;
Results.insert(pair<int,float>(numbOfBlock*numbOfThread,t2));
}
}
map<int,float>::iterator it;
ofstream fout("test0.txt");
for (it = Results.begin(); it != Results.end(); ++it)///вывод на экран
{
fout << it->first << ' ' << it->second << endl;
}
fout.close();
//cout << "Количество точек: \t\t" << columns*rows << endl;
//cout << "Количество потоков: \t\t" << numbOfBlock*numbOfThread << endl;
//cout << "Время копирования на GPU: \t" << t1 << endl;
//cout << "Время выполенния: \t\t" << t2 << endl;
//cout << "Время копирования с GPU: \t" << t3 << endl;
return 0;
}
|
3,455 | #include <cassert>
#include <cstdlib>
#include <iostream>
#include <chrono>
using namespace std;
#define MASK_LENGTH 7
__constant__ int mask[MASK_LENGTH];
__global__ void convolution_1d(int *array, int *result, int n);
void verify_result(int *array, int *mask, int *result, int n);
auto get_time() { return chrono::high_resolution_clock::now(); }
int main()
{
int n = 1000 << 16;
int bytes_n = n * sizeof(int);
size_t bytes_m = MASK_LENGTH * sizeof(int);
int r = MASK_LENGTH / 2;
int n_p = n + r * 2;
size_t bytes_p = n_p * sizeof(int);
// CPU
int *h_array = new int[n_p];
int *h_mask = new int[MASK_LENGTH];
int *h_result = new int[n];
for (int i = 0; i < n_p; i++)
if ((i < r) || (i >= (n + r)))
h_array[i] = 0;
else
h_array[i] = rand() % 100;
for (int i = 0; i < MASK_LENGTH; i++)
h_mask[i] = rand() % 10;
// GPU
int *d_array, *d_result;
cudaMalloc(&d_array, bytes_p);
cudaMalloc(&d_result, bytes_n);
// CPU --> GPU
cudaMemcpy(d_array, h_array, bytes_p, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(mask, h_mask, bytes_m);
int THREADS = 256;
int GRID = (n + THREADS - 1) / THREADS;
size_t SHMEM = (THREADS + r * 2) * sizeof(int);
auto start = get_time();
convolution_1d<<<GRID, THREADS, SHMEM>>>(d_array, d_result, n);
// GPU --> CPU
cudaMemcpy(h_result, d_result, bytes_n, cudaMemcpyDeviceToHost);
auto finish = get_time();
auto duration =
chrono::duration_cast<std::chrono::milliseconds>(finish - start);
cout << "temps écoulé en kernel = " << duration.count() << " ms\n";
verify_result(h_array, h_mask, h_result, n);
cout << "terminé avec succès"<<endl;
cudaFree(d_array);
cudaFree(d_result);
delete[] h_array;
delete[] h_result;
delete[] h_mask;
return 0;
}
__global__ void convolution_1d(int *array, int *result, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int s_array[];
int r = MASK_LENGTH / 2;
int d = 2 * r;
int n_padded = blockDim.x + d;
int offset = threadIdx.x + blockDim.x;
int g_offset = blockDim.x * blockIdx.x + offset;
s_array[threadIdx.x] = array[tid];
if (offset < n_padded)
s_array[offset] = array[g_offset];
__syncthreads();
int temp = 0;
for (int j = 0; j < MASK_LENGTH; j++)
temp += s_array[threadIdx.x + j] * mask[j];
result[tid] = temp;
}
void verify_result(int *array, int *mask, int *result, int n)
{
int temp;
for (int i = 0; i < n; i++)
{
temp = 0;
for (int j = 0; j < MASK_LENGTH; j++)
temp += array[i + j] * mask[j];
assert(temp == result[i]);
}
} |
3,456 | /*
* Copyright 2011-2015 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain metric values
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
// #include <cupti.h>
#include <math_constants.h>
// #include "../../lcutil.h"
#include <cuda_profiler_api.h>
#define CUDA_SAFE_CALL( call) { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define DRIVER_API_CALL(apiFuncCall) \
do { \
CUresult _status = apiFuncCall; \
if (_status != CUDA_SUCCESS) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
cudaError_t _status = apiFuncCall; \
if (_status != cudaSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status));\
exit(-1); \
} \
} while (0)
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
// #define COMP_ITERATIONS (512)
#define THREADS (1024)
#define BLOCKS (3276)
#define N (10)
#define REGBLOCK_SIZE (4)
// #define UNROLL_ITERATIONS (32)
#define deviceNum (0)
// #define OFFSET
#define INNER_REPS 512
#define UNROLLS 32
// __constant__ __device__ int off [16] = {0,4,8,12,9,13,1,5,2,6,10,14,11,15,3,7}; //512 threads
// __constant__ __device__ int off [16] = {0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3}; //512 threads
// __constant__ __device__ int off [16] = {0,2,4,6,8,10,12,14,11,9,15,13,3,1,7,5}; //256 threads
template <class T> __global__ void benchmark (T* cdin, T* cdout){
// const int total = THREADS*BLOCKS+THREADS;
const int ite = blockIdx.x * THREADS + threadIdx.x;
T r0;
// printf("%d - %d\n", blockIdx.x,off[blockIdx.x]);
// T r1,r2,r3;
// r0=cdin[ite];
for (int k=0; k<N;k++){
#pragma unroll 512
for(int j=0; j<INNER_REPS; j+=UNROLLS){
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
r0 = cdin[ite];
cdout[ite]=r0;
}
}
cdout[ite]=r0;
}
double median(int n, double x[][4],int col) {
double temp;
int i, j;
// the following two loops sort the array x in ascending order
for(i=0; i<n-1; i++) {
for(j=i+1; j<n; j++) {
if(x[j][col] < x[i][col]) {
// swap elements
temp = x[i][col];
x[i][col] = x[j][col];
x[j][col] = temp;
}
}
}
if(n%2==0) {
// if there is an even number of elements, return mean of the two elements in the middle
return((x[n/2][col] + x[n/2 - 1][col]) / 2.0);
} else {
// else return the element in the middle
return x[n/2][col];
}
}
void initializeEvents(cudaEvent_t *start, cudaEvent_t *stop){
CUDA_SAFE_CALL( cudaEventCreate(start) );
CUDA_SAFE_CALL( cudaEventCreate(stop) );
CUDA_SAFE_CALL( cudaEventRecord(*start, 0) );
}
float finalizeEvents(cudaEvent_t start, cudaEvent_t stop){
CUDA_SAFE_CALL( cudaGetLastError() );
CUDA_SAFE_CALL( cudaEventRecord(stop, 0) );
CUDA_SAFE_CALL( cudaEventSynchronize(stop) );
float kernel_time;
CUDA_SAFE_CALL( cudaEventElapsedTime(&kernel_time, start, stop) );
CUDA_SAFE_CALL( cudaEventDestroy(start) );
CUDA_SAFE_CALL( cudaEventDestroy(stop) );
return kernel_time;
}
void runbench(int type, double* kernel_time, double* bandw,double* cdin,double* cdout){
cudaEvent_t start, stop;
initializeEvents(&start, &stop);
dim3 dimBlock(THREADS, 1, 1);
dim3 dimGrid(BLOCKS, 1, 1);
// if (type==0){
benchmark<float><<< dimGrid, dimBlock >>>((float*)cdin,(float*)cdout);
// }else{
// benchmark<double><<< dimGrid, dimBlock >>>(cdin,cdout, inner_reps, unrolls);
// }
long long shared_access = 2*(long long)(INNER_REPS)*N*THREADS*BLOCKS;
cudaDeviceSynchronize();
double time = finalizeEvents(start, stop);
double result;
if (type==0)
result = ((double)shared_access)*4/(double)time*1000./(double)(1024*1024*1024);
else
result = ((double)shared_access)*8/(double)time*1000./(double)(1024*1024*1024);
*kernel_time = time;
*bandw=result;
}
int main(int argc, char *argv[]){
// CUpti_SubscriberHandle subscriber;
CUcontext context = 0;
CUdevice device = 0;
int deviceCount;
char deviceName[32];
int outer_reps;
// , vector_size, tile_dim;
if (argc>1){
outer_reps = atoi(argv[1]);
}else{
outer_reps = 1;
}
// cupti_eventData cuptiEvent;
// RuntimeApiTrace_t trace;
cudaDeviceProp deviceProp;
printf("Usage: %s [device_num] [metric_name]\n", argv[0]);
cudaSetDevice(deviceNum);
double mean[4];
double time[outer_reps][2],value[outer_reps][4],sum_dev_median[4],sum_dev_mean[4],medianv[4],std_dev_mean[4],std_dev_median[4];
long SPresult[outer_reps],DPresult[outer_reps],timeresult[outer_reps][2];
int L2size;
int counters;
// StoreDeviceInfo_DRAM(stdout,&L2size);
int size = THREADS*BLOCKS*sizeof(double);
size_t freeCUDAMem, totalCUDAMem;
cudaMemGetInfo(&freeCUDAMem, &totalCUDAMem);
printf("Total GPU memory %lu, free %lu\n", totalCUDAMem, freeCUDAMem);
printf("Buffer size: %dMB\n", size*sizeof(double)/(1024*1024));
SPresult[0]=0;
DPresult[0]=0;
//Initialize Global Memory
double *cdin,L2=32;
double *cdout;
CUDA_SAFE_CALL(cudaMalloc((void**)&cdin, size));
CUDA_SAFE_CALL(cudaMalloc((void**)&cdout, size));
// Copy data to device memory
CUDA_SAFE_CALL(cudaMemset(cdin, 1, size)); // initialize to zeros
CUDA_SAFE_CALL(cudaMemset(cdout, 0, size)); // initialize to zeros
// Synchronize in order to wait for memory operations to finish
CUDA_SAFE_CALL(cudaThreadSynchronize());
// make sure activity is enabled before any CUDA API
DRIVER_API_CALL(cuDeviceGetCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
return -2;
}
printf("CUDA Device Number: %d\n", deviceNum);
DRIVER_API_CALL(cuDeviceGet(&device, deviceNum));
CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, device));
DRIVER_API_CALL(cuDeviceGetName(deviceName, 32, device));
int i;
class type;
uint64_t L2units;
size_t sizet=sizeof(L2units);
for (i=0;i<outer_reps;i++){
uint32_t all = 1;
runbench(0,&time[0][0],&value[0][0],cdin,cdout);
printf("Registered time: %f ms\n",time[0][0]);
}
CUDA_SAFE_CALL( cudaDeviceReset());
return 0;
}
|
3,457 | #include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
#define TPB 256
#define PARTICLES 10000000
#define ITTERATIONS 10
struct Particle
{
float3 position;
float3 velocity;
};
__device__ float3 operator+(const float3& p1, const float3& p2)
{
return make_float3(p1.x + p2.x, p1.y + p2.y, p1.z + p2.z);
}
__device__ float3 operator*(const float3& p1, const int& p2)
{
return make_float3(p1.x * p2, p1.y * p2, p1.z * p2);
}
__host__ int operator!=(const float3& p1, const float3& p2)
{
if (p1.x != p2.x)
return 1;
else if (p1.y != p2.y)
return 1;
else if (p1.z != p2.z)
return 1;
else
return 0;
}
__global__ void update_gpu(Particle* particles, int dt)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float3 delta_velocity = make_float3(1, 2, 3);
particles[i].velocity = particles[i].velocity + delta_velocity;
particles[i].position = particles[i].position + particles[i].velocity * dt;
}
void update_cpu(int n, Particle* particles, int dt)
{
for (int i = 0; i < n; i++)
{
particles[i].velocity.x += 1;
particles[i].velocity.y += 2;
particles[i].velocity.z += 3;
particles[i].position.x += particles[i].velocity.x * dt;
particles[i].position.y += particles[i].velocity.y * dt;
particles[i].position.z += particles[i].velocity.z * dt;
}
}
int particle_compare(int n, Particle* cpu, Particle* gpu)
{
for (int i = 0; i < n; i++)
{
if (cpu[i].position != gpu[i].position || cpu[i].velocity != gpu[i].velocity)
return 0;
}
return 1;
}
int main()
{
double time_diff = 0.0;
clock_t start, end;
//Particle *particles = (Particle*)malloc(PARTICLES * sizeof(Particle));
Particle* particles;
cudaHostAlloc(&particles, PARTICLES * sizeof(Particle), cudaHostAllocDefault);
for (int i = 0; i < PARTICLES; i++)
{
particles[i].position = make_float3(rand() % 100, rand() % 100, rand() % 100);
particles[i].velocity = make_float3(rand() % 100, rand() % 100, rand() % 100);
}
Particle* particles_gpu;
//Particle* results_particles = (Particle*)malloc(PARTICLES * sizeof(Particle));
Particle* results_particles;
cudaHostAlloc(&results_particles, PARTICLES * sizeof(Particle), cudaHostAllocDefault);
cudaMalloc(&particles_gpu, PARTICLES * sizeof(Particle));
//cudaMallocHost(&particles_gpu, PARTICLES * sizeof(Particle),cudaHostAllocDefault);
start = clock();
for (int i = 0; i < ITTERATIONS; i++)
{
cudaMemcpy(particles_gpu, particles, PARTICLES * sizeof(Particle), cudaMemcpyHostToDevice);
update_gpu <<< (PARTICLES + TPB - 1) / TPB, TPB >>> (particles_gpu, 1);
cudaMemcpy(results_particles, particles_gpu, PARTICLES * sizeof(Particle), cudaMemcpyDeviceToHost);
}
cudaDeviceSynchronize();
end = clock();
time_diff = (double)(end - start) / CLOCKS_PER_SEC;
printf("GPU execution time: %f seconds\n", time_diff);
start = clock();
for (int i = 0; i < ITTERATIONS; i++)
{
update_cpu(PARTICLES, particles, 1);
}
end = clock();
time_diff = (double)(end - start) / CLOCKS_PER_SEC;
printf("CPU execution time: %f seconds\n", time_diff);
/*if (particle_compare(PARTICLES, particles, results_particles))
printf("Comparison Successful\n");
else
printf("Error\n");*/
cudaFree(particles_gpu);
//free(particles);
//free(results_particles);
cudaFreeHost(particles);
cudaFreeHost(results_particles);
return 0;
} |
3,458 | #include <stdio.h>
__global__ void square_1d_vector(float * d_out , float * d_in)
{
int idx = threadIdx.x;
d_out[idx] = d_in[idx] * d_in[idx];
}
void call_1d_parallel_computing(void)
{
const int ARRAY_SIZE = 32;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//Host mem arrays
float h_1d_in[ARRAY_SIZE];
float h_1d_out[ARRAY_SIZE];
printf("Original Array: \n");
for(int i = 0; i < ARRAY_SIZE;i++)
{
h_1d_in[i] = float(i);
printf("%d " , i);
}
printf("\n");
//Device mem arrays
float * d_1d_in;
float * d_1d_out;
cudaMalloc((void **) &d_1d_in,ARRAY_BYTES);
cudaMalloc((void **) &d_1d_out, ARRAY_BYTES);
cudaMemcpy(d_1d_in,h_1d_in,ARRAY_BYTES,cudaMemcpyHostToDevice);
square_1d_vector<<< 1, ARRAY_SIZE >>>(d_1d_out , d_1d_in);
cudaMemcpy(h_1d_out,d_1d_out,ARRAY_BYTES,cudaMemcpyDeviceToHost);
printf("Square Array : \n");
for(int i = 0;i < ARRAY_SIZE; i++)
{
printf("%d ",int(h_1d_out[i]));
}
printf("\n");
cudaFree(d_1d_in);
cudaFree(d_1d_out);
}
void print_header(void)
{
printf(" ============================================== \n");
printf(" ===== PARALLEL PROGRAMMING (1D - VECTOR) ===== \n");
printf(" ============================================== \n");
printf("\n");
}
int main(int argc,char** argv)
{
print_header();
printf("Computing square of numbers in some array ... \n");
call_1d_parallel_computing();
return 0;
}
|
3,459 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <curand.h>
#include <curand_kernel.h>
#define min( a,b ) ( (a) > (b) ? (b) : (a) )
__global__
static void Normal_random_number_kern( int m_A, int n_A,
double * A_pg, int ldim_A,
curandState_t state,
unsigned long long rand_seed ) {
// fill matrix A with random numbers of standard normal distribution
int ij = blockIdx.x * blockDim.x + threadIdx.x;
int i = ij - m_A * ( ij / m_A ); // floor implicitly taken in div
int j = ij / m_A; // floor implicitly taken
//seed RNG
curand_init( rand_seed, ij, 0, & state );
if ( ij < m_A * n_A ) {
A_pg[ i + j * ldim_A ] = curand_normal( & state );
}
}
static void Normal_random_matrix( int m_A, int n_A,
double * A_pg, int ldim_A,
curandState_t state,
unsigned long long * rs_pt ) {
// host function which fills the gpu array with random numbers
// of standard normal distribution
Normal_random_number_kern<<< m_A * n_A, 1 >>>( m_A, n_A, A_pg, ldim_A, state, * rs_pt );
* rs_pt += 1;
}
void print_double_matrix( const char * name, int m_A, int n_A,
double * buff_A, int ldim_A ) {
int i, j;
printf( "%s = [\n", name );
for( i = 0; i < m_A; i++ ) {
for( j = 0; j < n_A; j++ ) {
printf( "%le ", buff_A[ i + j * ldim_A ] );
}
printf( "\n" );
}
printf( "];\n" );
}
int main() {
// declare, initialize variables
int m_A, n_A, ldim_A;
m_A = 5; n_A = 5; ldim_A = m_A;
const char * A_name = "A";
double * A_pc, * A_pg;
curandState_t state;
unsigned long long rand_seed = 7;
unsigned long long * rs_pt = & rand_seed;
// allocate array on host (cpu)
A_pc = ( double * ) malloc( m_A * n_A * sizeof( double ) );
// allocate array on device (gpu)
cudaMalloc( & A_pg, m_A * n_A * sizeof( double ) );
// call function; fill gpu array with random standard normal numbers
Normal_random_matrix( m_A, n_A, A_pg, ldim_A, state, rs_pt );
// copy result to host
cudaMemcpy( A_pc, A_pg, m_A * n_A * sizeof( double ), cudaMemcpyDeviceToHost );
// print out result
//print_double_matrix( A_name, m_A, n_A, A_pc, ldim_A );
// repeat to test whether numbers generated are different
// call function; fill gpu array with random standard normal numbers
Normal_random_matrix( m_A, n_A, A_pg, ldim_A, state, rs_pt );
// copy result to host
cudaMemcpy( A_pc, A_pg, m_A * n_A * sizeof( double ), cudaMemcpyDeviceToHost );
// print out result
//print_double_matrix( A_name, m_A, n_A, A_pc, ldim_A );
// free memory
free( A_pc );
cudaFree( A_pg );
return 0;
}
|
3,460 | #include <iostream>
#include <algorithm>
#include <chrono>
__global__ void add(float *x, float *y, float *z, int size)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < size; i += stride)
{
z[i] = x[i] + y[i];
}
}
cudaError_t cuda_add(float *x, float *y, float *z, int size);
int main()
{
const int N = 1 << 20;
float *x = new float[N];
float *y = new float[N];
float *z = new float[N];
std::fill_n(x, N, 1.0f);
std::fill_n(y, N, 2.0f);
cudaError_t cudaStatus = cuda_add(x, y, z, N);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "add_cuda failed!");
return 1;
}
float max_err = 0.0f;
for (int i = 0; i < N; i++)
{
max_err = std::fmax(max_err, std::fabs(z[i]-3.0f));
}
std::cout << "Max error: " << max_err << std::endl;
delete[] x;
delete[] y;
delete[] z;
}
cudaError_t cuda_add(float *x, float *y, float *z, int size)
{
float *dev_x = 0;
float *dev_y = 0;
float *dev_z = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return cudaStatus;
}
cudaStatus = cudaMalloc((void**)&dev_x, size * sizeof(float));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
cudaFree(dev_x);
return cudaStatus;
}
cudaStatus = cudaMalloc((void**)&dev_y, size * sizeof(float));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
cudaFree(dev_x);
cudaFree(dev_y);
return cudaStatus;
}
cudaStatus = cudaMalloc((void**)&dev_z, size * sizeof(float));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
return cudaStatus;
}
cudaStatus = cudaMemcpy(dev_x, x, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
return cudaStatus;
}
cudaStatus = cudaMemcpy(dev_y, y, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
return cudaStatus;
}
auto start = std::chrono::high_resolution_clock::now();
add<<<1, 1>>>(dev_x, dev_y, dev_z, size);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
return cudaStatus;
}
cudaStatus = cudaDeviceSynchronize();
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start);
std::cout << duration.count() << " microseconds" << std::endl;
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
return cudaStatus;
}
cudaStatus = cudaMemcpy(z, dev_z, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
return cudaStatus;
}
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
return cudaStatus;
} |
3,461 | #include "includes.h"
__global__ void init(float* xbar, float* xcur, float* xn, float* y1, float* y2, float* img, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < w && y < h) {
int i;
float val;
for (int z = 0; z < nc; z++) {
i = x + w * y + w * h * z;
val = img[i];
xbar[i] = val;
xn[i] = val;
xcur[i] = val;
y1[i] = 0.f;
y2[i] = 0.f;
}
}
} |
3,462 | #include <cassert>
#include <iostream>
#include <math.h>
#include <cooperative_groups.h>
using namespace cooperative_groups;
// Reduces a thread group to a single element
__device__ int reduce_sum(thread_group g, int *temp, int val){
int lane = g.thread_rank();
// Each thread adds its partial sum[i] to sum[lane+i]
for (int i = g.size() / 2; i > 0; i /= 2){
temp[lane] = val;
// wait for all threads to store
g.sync();
if (lane < i) {
val += temp[lane + i];
}
// wait for all threads to load
g.sync();
}
// note: only thread 0 will return full sum
return val;
}
// Creates partials sums from the original array
__device__ int thread_sum(int *input, int n){
int sum = 0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < n / 4; i += blockDim.x * gridDim.x){
// Cast as int4
int4 in = ((int4*)input)[i];
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
__global__ void sum_reduction(int *sum, int *input, int n){
// Create partial sums from the array
int my_sum = thread_sum(input, n);
// Dynamic shared memory allocation
extern __shared__ int temp[];
// Identifier for a TB
auto g = this_thread_block();
// Reudce each TB
int block_sum = reduce_sum(g, temp, my_sum);
// Collect the partial result from each TB
if (g.thread_rank() == 0) {
atomicAdd(sum, block_sum);
}
}
void initialize_vector(int *v, int n) {
for (int i = 0; i < n; i++) {
v[i] = 1;//rand() % 10;
}
}
int main() {
int n = 1<<13;
size_t bytes = n * sizeof(int);
int *sum, *data;
cudaMallocManaged(&sum, sizeof(int));
cudaMallocManaged(&data, bytes);
initialize_vector(data,n);
int TB_SIZE = 256;
int GRID = (n + TB_SIZE -1 ) / TB_SIZE;
sum_reduction<<<GRID,TB_SIZE, n*sizeof(int)>>>(sum,data,n);
cudaDeviceSynchronize();
assert(*sum == 8192);
printf("Done\n");
return 0;
}
|
3,463 | #include "includes.h"
__global__ void FullyConnectedCurvatureKernel( float *weightsGradPtr, float *biasGradPtr, float *shiftedWeightsPtr, float *shiftedBiasPtr, float *avgWeightGradPtr, float *avgBiasGradPtr, float *weightGradCurvePtr, float *biasGradCurvePtr, float *dropoutMaskPtr, int prevLayerSize, int thisLayerSize )
{
// i: prev. layer neuron id
// j: current layer neuron id
float avgGrad;
int i;
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
if (!dropoutMaskPtr[j])
{
int index = j;
for (i = 0; i < prevLayerSize; i++)
{
// weight finite difference curvature
avgGrad = avgWeightGradPtr[index];
if (avgGrad == 0)
avgGrad == 0.000001; // don't divide by 0!
weightGradCurvePtr[index] = abs(weightsGradPtr[index] - shiftedWeightsPtr[index]) / avgGrad;
index += thisLayerSize;
}
// bias finite difference curvature
avgGrad = avgBiasGradPtr[j];
if (avgGrad == 0)
avgGrad == 0.000001; // don't divide by 0!
biasGradCurvePtr[j] = abs(biasGradPtr[j] - shiftedBiasPtr[j]) / avgGrad;
}
}
} |
3,464 | #pragma once
#include <iostream>
#include <numeric>
#include <curand.h>
#include <curand_kernel.h>
#include <ctime>
#include <chrono>
#include <iomanip>
#include <sstream>
#include <fstream>
typedef double(*FunctionCallback)(double);
namespace parallel {
__global__ void monteCarloThread(unsigned long seed,
double A, double B,
double min_Y, double max_Y,
int* array, int threads_amount, int gpu_size,
FunctionCallback f)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < threads_amount) {
int addToScore = 0;
double X, randomValue, realValue;
curandState_t state;
curand_init(seed, gid, 0, &state);
for(int j = 0; j < gpu_size; ++j){
X = curand_uniform_double(&state) * (B - A) + A;
randomValue = curand_uniform_double(&state) * (max_Y - min_Y) + min_Y;
realValue = f(X);
if ((randomValue > 0) && (randomValue <= realValue)) {
++addToScore;
}
else if ((randomValue < 0) && (randomValue >= realValue)) {
--addToScore;
}
}
array[gid] = addToScore;
}
}
double monteCarlo(int n, double A, double B, double min, double max, FunctionCallback f){
unsigned long cuRand_seed = time(NULL);
int score = 0;
double result;
cudaDeviceProp iProp;
cudaGetDeviceProperties(&iProp, 0);
//int threads = 512;
//int blocks = 100;
int threads = iProp.maxThreadsPerBlock;
int blocks = iProp.multiProcessorCount;
//hostp pointers
int* gpu_results;
//device pointers
int* d_c;
int size = threads * blocks;
int sizeInBytes = size * sizeof(int);
gpu_results = (int*)malloc(sizeInBytes);
memset(gpu_results, 0, sizeInBytes);
cudaMalloc((int**)&d_c, sizeInBytes);
int calculationsPerThread = (n + size -1) / size;
monteCarloThread<<<blocks, threads>>> (cuRand_seed, A, B, min, max, d_c, size, calculationsPerThread, f);
cudaDeviceSynchronize();
cudaMemcpy(gpu_results, d_c, sizeInBytes, cudaMemcpyDeviceToHost);
score = std::accumulate(gpu_results, gpu_results+size, 0);
result = (score / ((double)size*calculationsPerThread)) *
((B - A) * (max - min));
cudaFree(d_c);
free(gpu_results);
return result;
}
void timeTestMonteCarloPar(int m, int n, double a, double b, double min, double max, FunctionCallback f){
std::cout << std::setprecision(5);
std::chrono::duration<double> total = std::chrono::duration<double>::zero();
std::chrono::duration<double> diff;
std::chrono::high_resolution_clock::time_point start;
std::chrono::high_resolution_clock::time_point end;
std::ofstream file;
std::stringstream filename;
filename << "montePar_" << m << '_' << n << ".txt";
n = 1 << n;
file.open(filename.str());
if (file.good() == true)
{
std::cout << "Testing parallel Monte Carlo... for size: " << n << std::endl;
for(int i = 1; i <= m; ++i){
start = std::chrono::high_resolution_clock::now();
monteCarlo(n, a, b, min, max, f);
end = std::chrono::high_resolution_clock::now();
std::cout << "\r" << i * 100.0 / m << "% ";
std::cout << std::flush;
diff = end - start;
file << diff.count() << std::endl;
total += diff;
}
file.close();
}
std::cout << std::endl;
std::cout << "Parallel Monte Carlo average time: " << total.count()/m << std::endl;
}
} |
3,465 |
//#include <helper_cuda.h>
//#include <algorithm>
#include <time.h>
#include <limits.h>
//#define RADIX 4294967296
//#define RADIX 2147483658
#define RADIX 65536
//#define numElements 1048576
#define numElements 30000
#define numIterations 10
#define BLOCKSIZE 128
// countlength/threadsperblock
void __global__ d_doPrefix(int* __restrict__ d_count, int countLength, int* __restrict__ d_prefix, int prefixLength)
{
// printf("do prefix = %d \n", threadIdx.x);
int sum = 0;
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < prefixLength)
{
d_prefix[index] = 0;
}
__syncthreads();
for(int i=index; i>=0; i--)
{
sum += d_count[i];
}
if(index < prefixLength)
atomicAdd(d_prefix +index+1, sum);
//printf("finished doPrefix \n");
}
void __global__ d_doCount(int* __restrict__ d_unsorted, int unsortedLength, int* __restrict__ d_count, int countLength, int offset)
{
//printf("do count \n");
int index = threadIdx.x + blockIdx.x * blockDim.x;
//printf("index = %d \n", index);
if(index <countLength)
{
d_count[index] = 0;
}
__syncthreads();
if(index < unsortedLength)
{
int numToSort = d_unsorted[index];
numToSort = numToSort >> offset;
numToSort = (countLength-1)&(numToSort);
//printf("num = %d \n", numToSort);
atomicAdd(d_count + numToSort, 1);
}
//printf("finished count \n");
}
/*
* d_doReorder:
* leftover from an attempt to find a parallel reorder strategy
* did not get this working
*/
/*
void __global__ d_doReorder(int* __restrict__ d_unsorted, int unsortedLength, int* __restrict__ d_sorted, int sortedLength, int* __restrict__ d_prefix, int prefixLength, int offset)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if( index <unsortedLength)
{
int currentNum;
int newIndex;
int prefix;
//printf(" doReorder index %d \n", index);
// shifting and masking
currentNum = d_unsorted[index];
currentNum = currentNum >> offset;
currentNum = (prefixLength -1) & currentNum;
if (currentNum < prefixLength)
prefix = d_prefix[currentNum];
//else
//prefix = sortedLength;
newIndex = index % prefix;
//printf("prefix check: prefix = %d masked number = %d, real number = %d, index = %d, newIndex = %d \n", prefix, currentNum, d_unsorted[index], index, newIndex);
d_sorted[newIndex] = d_unsorted[index];
//d_unsorted = d_sorted;
}
}
*/
/*
* d_lazyReorder:
* sequential reordering done on the GPU,
*/
void __global__ d_lazyReorder(int* __restrict__ d_unsorted, int unsortedLength, int* __restrict__ d_sorted, int sortedLength, int* __restrict__ d_prefix, int prefixLength, int offset, int threadCount)
{
//printf("lazy sort prefixlength %d, offset %d \n", prefixLength, offset);
//int index = threadIdx.x + blockIdx.x * blockDim.x;
int loopMax = ceil((float)unsortedLength/(float)threadCount);
int currentNum;
int newIndex;
if(threadIdx.x < 1)
{
for (int i =0; i<unsortedLength; i++)
{
// shifting and masking
currentNum = d_unsorted[i];
currentNum = currentNum >> offset;
currentNum = (prefixLength -1) & currentNum;
newIndex = d_prefix[currentNum];
d_prefix[currentNum]++;
d_sorted[newIndex] = d_unsorted[i];
//d_unsorted = d_sorted;
}
}
__syncthreads();
for (int i =0; i<loopMax; i++)
{
int index = threadIdx.x*loopMax + i;
if( index < sortedLength)
d_unsorted[index] = d_sorted[index];
}
}
/*
* d_lazyReorderorig:
* sequential reordering done on the GPU,
*/
void __global__ d_lazyReorderorig(int* __restrict__ d_unsorted, int unsortedLength, int* __restrict__ d_sorted, int sortedLength, int* __restrict__ d_prefix, int prefixLength, int offset)
{
//printf("lazy sort prefixlength %d, offset %d \n", prefixLength, offset);
int currentNum;
int newIndex;
for (int i =0; i<unsortedLength; i++)
{
// shifting and masking
currentNum = d_unsorted[i];
currentNum = currentNum >> offset;
currentNum = (prefixLength -1) & currentNum;
newIndex = d_prefix[currentNum];
d_prefix[currentNum]++;
d_sorted[newIndex] = d_unsorted[i];
//d_unsorted = d_sorted;
}
for (int i =0; i<unsortedLength; i++)
{
d_unsorted[i] = d_sorted[i];
}
}
|
3,466 | __global__ void double_itself(int* A) {
int tid = threadIdx.x;
A[tid] += A[tid];
}
|
3,467 | #include "includes.h"
__global__ void reduce(int *a, int *b, int n) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
atomicAdd(b, a[i]);
} |
3,468 | #include <math.h>
__device__ float fact_fun(int idx){
float fact = 1;
for(int i = 1; i<idx+1; i++){
fact = fact*i;
}
fact = 1/fact;
return fact;
}
__global__ void e_sum(float *c){
int duljina = 500;
const int idx = threadIdx.x;
c[idx] = fact_fun(idx);
c[duljina-idx-1] = fact_fun(duljina-idx);
}
|
3,469 | ////////////////////////////////////////////////////////////////////////////////
//
// FILE: n_sample_moving_avg.cu
// DESCRIPTION: runs N Sample Moving Average Filtering algorithm on gpu
// AUTHOR: Dan Fabian
// DATE: 2/16/2020
#include <iostream>
#include <random>
#include <chrono>
using std::cout; using std::endl; using std::cin;
using namespace std::chrono;
// all constants
const int NUM_OF_VALS = 10000, N = 256, NUM_OF_AVG = NUM_OF_VALS - N + 1;
// kernal func
__global__ void movingAvg(int *vals, float *avg)
{
// number of average calculations a single thread performs
int avgCalcPerThread = ceilf(float(NUM_OF_AVG) / float(blockDim.x * gridDim.x));
// thread index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// get first avg val for thread
int avgIdx = idx * avgCalcPerThread;
for (int i = 0; i < N && avgIdx < NUM_OF_AVG; ++i)
avg[avgIdx] += vals[avgIdx + i];
avg[avgIdx] /= N;
// calculate the rest of avg vals for thread
int maxAvgIdx = avgCalcPerThread * (idx + 1);
for (avgIdx = idx * avgCalcPerThread + 1;
avgIdx < maxAvgIdx && avgIdx < NUM_OF_AVG;
++avgIdx)
avg[avgIdx] = (avg[avgIdx - 1] * N + vals[avgIdx + N - 1] - vals[avgIdx - 1]) / N;
}
int main()
{
// create arrays of vals
int vals[NUM_OF_VALS], *vals_d;
float avg[NUM_OF_AVG], *avg_d;
// create rng
unsigned int seed = system_clock::now().time_since_epoch().count();
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> dist(0, 5);
// init vals
for (int i = 0; i < NUM_OF_VALS; ++i)
{
vals[i] = dist(generator);
if (i < NUM_OF_AVG)
avg[i] = 0;
}
// memory sizes to allocate
int valMem = sizeof(int) * NUM_OF_VALS, avgMem = sizeof(float) * NUM_OF_AVG;
// allocate memory on device
cudaMalloc((void**)&vals_d, valMem);
cudaMalloc((void**)&avg_d, avgMem);
// copy vals and avg to device
cudaMemcpy(vals_d, vals, valMem, cudaMemcpyHostToDevice);
cudaMemcpy(avg_d, avg, avgMem, cudaMemcpyHostToDevice);
// ask user for grid and block dims
cout << "Enter Grid X Dim: ";
int gridDim; cin >> gridDim;
cout << "Enter Block X Dim: ";
int blockDim; cin >> blockDim;
// call func
movingAvg<<<gridDim, blockDim>>>(vals_d, avg_d);
// copy device memory back to host
cudaMemcpy(avg, avg_d, avgMem, cudaMemcpyDeviceToHost);
/*
// print vals
for (int i = 0; i < NUM_OF_VALS; ++i)
cout << vals[i] << ' ';
cout << endl;
// print averages
for (int i = 0; i < NUM_OF_AVG; ++i)
cout << avg[i] << ' ';
cout << endl;
*/
// free all device memory
cudaFree(vals_d); cudaFree(avg_d);
} |
3,470 | #include "includes.h"
__global__ void _drop64(int n, double *x, double *y, double *xmask, double dropout, double scale) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (xmask[i] < dropout) y[i] = 0;
else y[i] = x[i] * scale;
i += blockDim.x * gridDim.x;
}
} |
3,471 | #include "includes.h"
//double* x, * devx, * val, * gra, * r, * graMax;
//double* hes_value;
////int size;
//int* pos_x, * pos_y;
//int* csr;
double* x;
//thrust::pair<int, int> *device_pos;
//typedef double (*fp)(double);
//typedef void (*val_fp)(double*, double*, int);
//typedef void (*valsum_fp)(double*, double*,int);
//typedef void (*gra_fp)(double*, double*, int);
//typedef void (*gramin_fp)(double*, double*,int);
//typedef void (*hes_fp)( double*, thrust::pair<int, int>*, double*, int);
//typedef void (*print_fp)(double*, int);
int numSMs;
__global__ void create_tuple(double* devx, int* pos_x, int* pos_y, double* value, int N) {
int index = threadIdx.x;
if (index < N) {
pos_x[index] = index;
pos_y[index] = index;
value[index] = 2 * cosf(2 * devx[index]);
}
else if(index == N){
pos_x[index] = N;
}
} |
3,472 | #include "includes.h"
//Library Definition
//Constant Definition
#define PI 3.141592654
#define blocksize 32
#define Repetitions 8192
//Print matrix into standard output
void print(double * M,int cols,int rows);
void dot(double * a,double * b, double & c, int cols);
void Create_New_Matrix(double * M,double * New,int * vec, int p0, int pp,int nn);
/*
DEVICE FUNCTIONS
*/
//Matrix transposition (Rows and Cols of M)
__global__ void set_zero(double *A, double *I, int nn, int i)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < nn && y < nn)
{
if (x != i)
{
if (y == i)
{
A[x*nn + y] = 0;
}
}
}
} |
3,473 | #include<bits/stdc++.h>
using namespace std;
typedef unsigned long long ull;
typedef pair< ull , ull> uull;
const ull MAX = 1;
uull LnRnBlocks[17]; // from l0r0 to l16r16
uull CnDnBlocks[17]; //from c0d0 to c16d16
ull keysBlocks[16]; //from key[1] = k0 to key[16] = k15
ull allCipherDES[1000000];
const ull Rotations[16] = {
1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1
};
const int PC1[56] = {
57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4
};
const int PC2[48] = {
14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34, 53,
46, 42, 50, 36, 29, 32
};
const int IniPer[64] = {
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7
};
const int reverseIniPer[64] ={
40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25
};
const int Expansion[48] ={
32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1
};
const int Pbox[32] ={
16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25
};
const int Sbox[8][4][16] = {
{
{14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7},
{ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8},
{ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0},
{15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13},
},
{
{15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10},
{ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5},
{ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15},
{13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9},
},
{
{10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8},
{13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1},
{13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7},
{ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12},
},
{
{ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15},
{13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9},
{10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4},
{ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14},
},
{
{ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9},
{14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6},
{ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14},
{11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3},
},
{
{12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11},
{10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8},
{ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6},
{ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13},
},
{
{ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1},
{13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6},
{ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2},
{ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12},
},
{
{13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7},
{ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2},
{ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8},
{ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11},
},
};
const ull iniKey[8] = {
0x13,0x34,0x57,0x79,0x9B,0xBC,0xDF,0xF1};
const ull message[8] = {
0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF};
__global__ void createKeyPlus( ull *cIniKey, int *cPC1, ull *result){
__shared__ ull keyPlus;
keyPlus = 0L;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
if( cIniKey[ cPC1[i]/8 ] & (1 << ( ( 64-cPC1[i]) % 8 ) ) ){
keyPlus|=( 1LL<< (55-i)*1L );
}
// keyPlus = keyPlus + 1L;
__syncthreads();
result[0] = keyPlus;
// printf("++ %lu\n", keyPlus);
}
ull generateKeyPlus(){
//host copies iniKey and PC1 arrays
ull *result;
ull keyPlus, *d_iniKey , *d_keyPlus, *d_result;
int *d_PC1;
int sizeIniKey = 8 * sizeof(ull);
int sizePC1 = 56 * sizeof( int );
int sizeKeyPlus = 2 * sizeof( ull );
//alloc space for host copies
cudaMalloc( (void **)&d_iniKey, sizeIniKey );
cudaMalloc( (void **)&d_PC1, sizePC1 );
cudaMalloc( (void **)&d_result, sizeKeyPlus);
//set up input values
keyPlus = 0L;
result = (ull *)malloc(sizeKeyPlus);
//copy inputs to device
cudaMemcpy(d_iniKey,iniKey,sizeIniKey,cudaMemcpyHostToDevice);
cudaMemcpy(d_PC1,PC1,sizePC1,cudaMemcpyHostToDevice);
cudaMemcpy(d_result,result,sizeKeyPlus,cudaMemcpyHostToDevice);
//launch kernel on GPU
createKeyPlus<<<1,56>>>(d_iniKey,d_PC1, d_result );
//copy result back
cudaMemcpy(result,d_result,sizeKeyPlus,cudaMemcpyDeviceToHost);
keyPlus = result[0];
free(result);
cudaFree(d_iniKey); cudaFree(d_PC1); cudaFree(d_keyPlus);
printf("After KEY %llu\n",keyPlus);
return keyPlus;
}
uull splitKeyPlus(ull keyPlus){
ull c0=0L, d0=0L;
for(int i=0;i<28;i++){
if(keyPlus & (1LL<<i*1L ) ) d0|=(1LL<<i*1L);
if(keyPlus & (1LL<< i*1L +28L ) ) c0|=(1LL<<i*1L);
}
return make_pair( c0, d0);
}
uull splitIniPer(ull codeIniPer){
ull l0=0L, r0=0L;
for(int i=0;i<32;i++){
if(codeIniPer & (1LL<<i*1L ) ) r0|=(1LL<<i*1L);
if(codeIniPer & (1LL<< i*1L +32L ) ) l0|=(1LL<<i*1L);
}
return make_pair( l0, r0);
}
void generateCnDnBlocks( uull seedKey){
CnDnBlocks[0] = seedKey;
ull cn ,dn, newCn, newDn;
ull getOnCn, getOnDn;
for(int i=1;i<=16;i++){
getOnCn = 0L;
getOnDn = 0L;
cn = CnDnBlocks[i-1].first ;
dn = CnDnBlocks[i-1].second;
for(ull j=0;j< Rotations[ i-1 ];j++){
if( cn & (1 << (27-j) ) ) getOnCn|= 1LL << (Rotations[ i-1 ]==1 ? j: 1-j) ;
if( dn & (1 << (27-j) ) ) getOnDn|= 1LL << (Rotations[ i-1 ]==1 ? j: 1-j);
}
newCn = cn << Rotations[ i-1 ];
newDn = dn << Rotations[ i-1 ];
for(ull j=0; j< Rotations[ i-1 ] ;j++){
newCn &= ~(1<< (28+j) );
newDn &= ~(1<< (28+j) );
}
newCn |= ( getOnCn );
newDn |= ( getOnDn );
CnDnBlocks[ i ] = make_pair( newCn, newDn);
}
}
ull joinCnDn(ull cn, ull dn){ return (cn<<28) | dn; }
void generateKeysBlocks(){
ull cnDn, keyn;
ull k;
for(int i=1;i<=16;i++){
cnDn = joinCnDn( CnDnBlocks[i].first, CnDnBlocks[i].second );
keyn = 0L; k=0L;
for(int j=48-1;j>=0;j--){
if( cnDn & ( 1LL << (56-PC2[j])*1L ) ) {
keyn|= ( 1LL<< k );
}
k++;
}
keysBlocks[i-1] = keyn;
}
}
ull generateIniPer(){
ull keyPlus=0L;
int j=0;
for(int i=64-1;i>=0;i--){
if( message[ (IniPer[i]/8) >=8 ? 7: (IniPer[i]/8) ] & (1LL << ( ( 64-IniPer[i]) % 8 ) ) ){
keyPlus|=( 1LL<< j*1L );
}
j++;
}
return keyPlus;
}
ull expandRn(ull Rn){
//from a Rn 32 bit to a Kn 48 bit
ull k=0L, exRn=0L;
for(int j=48-1;j>=0;j--){
if( Rn & ( 1LL << (32-Expansion[j])*1L ) ) {
exRn|= ( 1LL<< k );
}
k++;
}
return exRn;
}
ull xorOperation(ull En, ull Kn){
return (Kn ^ En);
}
ull getSboxNumber(int Bn, int k){
int row=0,col=0;
if( Bn & 1<<0 ) row |= ( 1<<0);
if( Bn & 1<<6-1) row |=( 1<<1 );
for(int i=1;i<=4;i++){
if( Bn & 1<<i ) col |=(1<<(i-1));
}
return ( Sbox[k][row][col]*1LL);
}
ull generateSboxCombination(ull Bn){
int Bbox[8];
ull sbBox[8];
int number=0, k=7;
ull snBnChain=0L;
ull step=28L;
for(int i=0;i<=48;i++ ){
if( i%6==0 && i>=6){
Bbox[ k-- ] = number;
number = 0;
}
if( Bn & (1LL<<i*1L) ){
number |= (1LL<< ( (i%6)*1L ) );
}
}
for(int i=0;i<8;i++){
sbBox[i] = getSboxNumber( Bbox[i], i);
}
for(int i=0;i<8;i++){
snBnChain |= (sbBox[i]<< step);
step-=4;
}
return snBnChain;
}
ull generateFalgorithm(ull snBn){
ull k=0L, fn=0L;
for(int j=32-1;j>=0;j--){
if( snBn & ( 1LL << (32-Pbox[j])*1L ) ) {
fn|= ( 1LL<< k );
}
k++;
}
return fn;
}
void generateLnRnBlocks(uull L0R0){
LnRnBlocks[0] = L0R0;
ull fn;
for(int time=1; time<=16;time++){
ull Ln = LnRnBlocks[ time-1 ].first;
ull Rn = LnRnBlocks[ time-1 ].second;
ull snBn =
generateSboxCombination( xorOperation( expandRn( Rn ),keysBlocks[ time-1 ] ) );
fn = generateFalgorithm(snBn);
uull LnRn = make_pair( Rn, (Ln ^ fn) );
LnRnBlocks[ time ] = LnRn;
}
}
ull reverseLnRn( uull LnRn){
ull Ln = LnRn.first;
ull Rn = LnRn.second;
return ( Rn<<32L) | Ln;
}
ull generateCipherMessage( ull RnLn ){
ull k=0L, cipher=0L;
for(int j=64-1;j>=0;j--){
if( RnLn & ( 1LL << (64-reverseIniPer[j])*1L ) ) {
cipher|= ( 1LL<< k );
}
k++;
}
return cipher;
}
ull cipherDES(){
uull keyHalves = splitKeyPlus( generateKeyPlus() );
generateCnDnBlocks( keyHalves );
generateKeysBlocks();
uull iniPerHalves = splitIniPer(generateIniPer() ); //got L0 and R0
generateLnRnBlocks( iniPerHalves );
ull revLnRn = reverseLnRn( LnRnBlocks[16] );
ull cipherMessage = generateCipherMessage( revLnRn );
// printf("cipher: %llu\n",cipherMessage);
// fflush(stdout);
// printf("Hex Cipher: %llX\n", cipherMessage);
// fflush(stdout);
return cipherMessage;
}
int main(){
for(int i=0;i<MAX;i++){
allCipherDES[ i ] = cipherDES();
}
printf("cipher: %llX\n", allCipherDES[0] );
return 0;
}
// L16 01000011010000100011001000110100 1128411700
// R16 00001010010011001101100110010101 172808597
// revL16R16 0000101001001100110110011001010101000011010000100011001000110100 742207273711055412
// cipher 1000010111101000000100110101010000001111000010101011010000000101 9648983453391827973 |
3,474 | /* https://devblogs.nvidia.com/even-easier-introduction-cuda/ */
#include <iostream>
#include <math.h>
// __global__: indica que a função add deverá ser executada na
__global__
void add(int n, float *x, float *y){
int index = threadIdx.x;
int stride = blockDim.x;
for(int i = index; i<n ; i=i+stride){
y[i] = x[i]+y[i];
}
}
int main(void){
int N = 1<<20;
/*
Alocação em C++ puro
float *x = new float[N];
float *y = new float[N];
*/
/* Alocação em CUDA */
float *x,*y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
for (int i=0;i<N;i++){
x[i]=1.0f;
y[i]=2.0f;
}
// Run kernel on 1M elements on the GPU
//Utilizando um thread block com um 256 threads
add<<<1,256>>>(N,x,y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
float maxError = 0.0f;
for(int i=0; i<N ; i++)
maxError = fmax(maxError,fabs(y[i]-3.0f));
std::cout <<"Max error: "<<maxError<<std::endl;
//Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
3,475 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
// Comment out this line to enable debug mode
// #define NDEBUG
/* time stamp function in seconds */
__host__ double getTimeStamp()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (double)tv.tv_usec / 1000000 + tv.tv_sec;
}
__host__ void initX(float *X, int numRows, int numCols)
{
int lastIBase = (numRows + 1) * numCols;
for (int j = 0; j < numCols; j++)
{
X[j] = 0;
X[lastIBase + j] = 0;
}
for (int i = 0; i < numRows; i++)
{
int iBase = (i + 1) * numCols;
for (int j = 0; j < numCols; j++)
{
// h_X_old[i,j] = (float) (i+j)/2.0;
X[iBase + j] = (float)(i + j) / 2.0;
}
}
}
__host__ void initY(float *Y, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
int iBase = i * (numCols + 2);
Y[iBase] = 0;
Y[iBase + 1] = 0;
for (int j = 0; j < numCols; j++)
{
// h_Y_old[i,j] = (float) 3.25*(i+j);
Y[iBase + j + 2] = (float)3.25 * (i + j);
}
}
}
#define H_ADJ_INDEX_X(i, j) ((i) + 1) * numCols + (j)
#define H_ADJ_INDEX_Y(i, j) (i) * (numCols + 2) + (j) + 2
#define H_INDEX(i, j) (i) * numCols + (j)
__host__ void f_siggen_reference(float *X, float *Y, float *Z, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
for (int j = 0; j < numCols; j++)
{
// Z[i,j] = X[i-1,j] + X[i,j] + X[i+1,j] – Y[i,j-2] – Y[i,j-1] – Y[i,j]
Z[H_INDEX(i, j)] =
X[H_ADJ_INDEX_X(i - 1, j)] +
X[H_ADJ_INDEX_X(i, j)] +
X[H_ADJ_INDEX_X(i + 1, j)] -
Y[H_ADJ_INDEX_Y(i, j - 2)] -
Y[H_ADJ_INDEX_Y(i, j - 1)] -
Y[H_ADJ_INDEX_Y(i, j)];
}
}
}
__host__ int checkZ(float *E, float *A, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
int ibase = i * numCols;
for (int j = 0; j < numCols; j++)
{
if (E[ibase + j] != A[ibase + j])
{
printf("(i=%d, j=%d) Expected=%f Actual=%f\n", i, j, E[ibase + j], A[ibase + j]);
return 0;
}
}
}
return 1;
}
__global__ void f_siggen(float *X, float *Y, float *Z, int numRows, int numCols, int smemNumElemX)
{
extern __shared__ float s_data[];
float *s_XT = s_data; // blockDim.x * (blockDim.y + 2);
int s_XTWidth = (blockDim.y + 2);
// int s_XTHeight = blockDim.x;
float *s_Y = s_XT + smemNumElemX; // (blockDim.x + 2) * blockDim.y;
/* Global Coordinate */
int globalX = blockDim.x * blockIdx.x + threadIdx.x;
int globalY = blockDim.y * blockIdx.y + threadIdx.y;
int globalIdx = globalY * numCols + globalX;
int globalXIdx = (globalY + 1) * numCols + globalX;
int globalYIdx = globalY * (numCols + 2) + globalX + 2;
if (globalX >= numCols || globalY >= numRows)
return;
/* Set Up s_XT */
int s_XTx = threadIdx.y + 1;
int s_XTy = threadIdx.x;
int s_XTIdx = s_XTy * s_XTWidth + s_XTx;
s_XT[s_XTIdx] = X[globalXIdx];
if (threadIdx.y == 0)
{
s_XT[s_XTIdx - 1] = X[globalXIdx - numCols];
}
if (threadIdx.y == blockDim.y - 1 || globalY == numRows - 1)
{
s_XT[s_XTIdx + 1] = X[globalXIdx + numCols];
}
/* Set Up s_Y */
int s_Yx = threadIdx.x + 2;
int s_Yy = threadIdx.y;
int s_YIdx = s_Yy * (blockDim.x + 2) + s_Yx;
s_Y[s_YIdx] = Y[globalYIdx];
if (threadIdx.x == 0)
{
s_Y[s_YIdx - 2] = Y[globalYIdx - 2];
s_Y[s_YIdx - 1] = Y[globalYIdx - 1];
}
/* Wait for All to Set Up s_XT and s_Y */
__syncthreads();
/* Write Output */
Z[globalIdx] = s_XT[s_XTIdx - 1] + s_XT[s_XTIdx] + s_XT[s_XTIdx + 1] - s_Y[s_YIdx - 2] - s_Y[s_YIdx - 1] - s_Y[s_YIdx];
}
int main(int argc, char *argv[])
{
int error = 0;
/* Get Dimension */
if (argc != 3)
{
printf("Error: The number of arguments is not exactly 2\n");
return 0;
}
int numRows = atoi(argv[1]);
int numCols = atoi(argv[2]);
size_t numElem = numRows * numCols;
size_t numBytes = numElem * sizeof(float);
int numRowsX = numRows + 2;
int numColsX = numCols;
size_t numElemX = numRowsX * numColsX;
size_t numBytesX = numElemX * sizeof(float);
int numRowsY = numRows;
int numColsY = numCols + 2;
size_t numElemY = numRowsY * numColsY;
size_t numBytesY = numElemY * sizeof(float);
#ifndef NDEBUG
printf("numRows=%d, numCols=%d, numElem=%ld, numBytes=%ld\n", numRows, numCols, numElem, numBytes);
printf("numRowsX=%d, numColsX=%d, numElemX=%ld, numBytesX=%ld\n", numRowsX, numColsX, numElemX, numBytesX);
printf("numRowsY=%d, numColsY=%d, numElemY=%ld, numBytesY=%ld\n", numRowsY, numColsY, numElemY, numBytesY);
#endif
/* Allocate Host Memory */
float *h_X = NULL;
float *h_Y = NULL;
float *h_hZ = (float *)malloc(numBytes);
float *h_dZ = NULL;
error = error || cudaHostAlloc((void **)&h_X, numBytesX, 0);
error = error || cudaHostAlloc((void **)&h_Y, numBytesY, 0);
error = error || cudaHostAlloc((void **)&h_dZ, numBytes, 0);
if (error)
{
printf("Error: cudaHostAlloc returns error\n");
return 0;
}
/* Initialize Host Memory */
initX(h_X, numRows, numCols);
initY(h_Y, numRows, numCols);
#ifndef NDEBUG
double timestampPreCpuKernel = getTimeStamp();
#endif
f_siggen_reference(h_X, h_Y, h_hZ, numRows, numCols);
#ifndef NDEBUG
double timestampPostCpuKernel = getTimeStamp();
printf("CPU=%.6fsec\n", timestampPostCpuKernel - timestampPreCpuKernel);
#endif
/* Allocate Device Memory */
float *d_X = NULL;
float *d_Y = NULL;
float *d_Z = NULL;
error = error || cudaMalloc((void **)&d_X, numBytesX);
error = error || cudaMalloc((void **)&d_Y, numBytesY);
error = error || cudaMalloc((void **)&d_Z, numBytes);
if (error)
{
printf("Error: cudaMalloc returns error\n");
return 0;
}
/* Copy Host Memory to Device Memory */
double timestampPreCpuGpuTransfer = getTimeStamp();
error = error || cudaMemcpy(d_X, h_X, numBytesX, cudaMemcpyHostToDevice);
error = error || cudaMemcpy(d_Y, h_Y, numBytesY, cudaMemcpyHostToDevice);
if (error)
{
printf("Error: cudaMemcpy returns error\n");
return 0;
}
/* Run Kernel */
double timestampPreKernel = getTimeStamp();
dim3 d_blockDim;
d_blockDim.x = 32;
d_blockDim.y = 32;
dim3 d_gridDim;
d_gridDim.x = (numCols - 1) / d_blockDim.x + 1;
d_gridDim.y = (numRows - 1) / d_blockDim.y + 1;
int d_smemNumElemX = d_blockDim.x * (d_blockDim.y + 2);
int d_smemNumElemY = (d_blockDim.x + 2) * d_blockDim.y;
size_t d_smemNumBytes = (d_smemNumElemX + d_smemNumElemY) * sizeof(float);
f_siggen<<<d_gridDim, d_blockDim, d_smemNumBytes>>>(d_X, d_Y, d_Z, numRows, numCols, d_smemNumElemX);
cudaDeviceSynchronize();
/* Copy Device Memory to Host Memory */
double timestampPreGpuCpuTransfer = getTimeStamp();
error = error || cudaMemcpy(h_dZ, d_Z, numBytes, cudaMemcpyDeviceToHost);
if (error)
{
printf("Error: cudaMemcpy returns error\n");
return 0;
}
double timestampPostGpuCpuTransfer = getTimeStamp();
/* Free Device Memory */
cudaFree(d_Z);
d_Z = NULL;
cudaFree(d_Y);
d_Y = NULL;
cudaFree(d_X);
d_X = NULL;
/* Verify Device Result with Host Result */
error = error || !checkZ(h_hZ, h_dZ, numRows, numCols);
/* Output */
#ifndef NDEBUG
printf("d_gridDim=(%d, %d), d_blockDim=(%d, %d), d_smemNumBytes=%ld\n", d_gridDim.x, d_gridDim.y, d_blockDim.x, d_blockDim.y, d_smemNumBytes);
#endif
if (!error)
{
// #ifndef NDEBUG
// printf("<total_GPU_time> <CPU_GPU_transfer_time> <kernel_time> <GPU_CPU_transfer_time> <Z-value> <nl>\n");
// #endif
float totalGpuElapased = timestampPostGpuCpuTransfer - timestampPreCpuGpuTransfer;
float cpuGpuTransferElapsed = timestampPreKernel - timestampPreCpuGpuTransfer;
float kernelElapsed = timestampPreGpuCpuTransfer - timestampPreKernel;
float gpuCpuTransferElapsed = timestampPostGpuCpuTransfer - timestampPreGpuCpuTransfer;
int zValueI = 5;
int zValueJ = 5;
float zValue = h_dZ[H_INDEX(zValueI, zValueJ)];
printf("%.6f %.6f %.6f %.6f %.6f\n", totalGpuElapased, cpuGpuTransferElapsed, kernelElapsed, gpuCpuTransferElapsed, zValue);
}
else
{
printf("Error: GPU result does not with CPU result\n");
#ifndef NDEBUG
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
printf("(i=%d, j=%d), CPU=%.6f, GPU=%.6f, X=%.6f, Y=%.6f\n", i, j, h_hZ[H_INDEX(i, j)], h_dZ[H_INDEX(i, j)], h_X[H_ADJ_INDEX_X(i, j)], h_Y[H_ADJ_INDEX_Y(i, j)]);
}
}
#endif
}
/* Free Host Memory */
cudaFreeHost(h_dZ);
h_dZ = NULL;
free(h_hZ);
h_hZ = NULL;
cudaFreeHost(h_Y);
h_Y = NULL;
cudaFreeHost(h_X);
h_X = NULL;
/* Clean Up Device Resource */
cudaDeviceReset();
} |
3,476 | #include "includes.h"
__global__ void stencil_2d(int *in, int *out)
{
/*
Fill kernel code!
*/
} |
3,477 | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple implementation of Mandelbrot set from Wikipedia
// http://en.wikipedia.org/wiki/Mandelbrot_set
// Note that this kernel is meant to be a simple, straight-forward
// implementation, and so may not represent optimized GPU code.
extern "C"
__device__
void mandelbrot(float* Data) {
// Which pixel am I?
unsigned DataX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned DataY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned Width = gridDim.x * blockDim.x;
unsigned Height = gridDim.y * blockDim.y;
float R, G, B, A;
// Scale coordinates to (-2.5, 1) and (-1, 1)
float NormX = (float)DataX / (float)Width;
NormX *= 3.5f;
NormX -= 2.5f;
float NormY = (float)DataY / (float)Height;
NormY *= 2.0f;
NormY -= 1.0f;
float X0 = NormX;
float Y0 = NormY;
float X = 0.0f;
float Y = 0.0f;
unsigned Iter = 0;
unsigned MaxIter = 1000;
// Iterate
while(X*X + Y*Y < 4.0f && Iter < MaxIter) {
float XTemp = X*X - Y*Y + X0;
Y = 2.0f*X*Y + Y0;
X = XTemp;
Iter++;
}
unsigned ColorG = Iter % 50;
unsigned ColorB = Iter % 25;
R = 0.0f;
G = (float)ColorG / 50.0f;
B = (float)ColorB / 25.0f;
A = 1.0f;
Data[DataY*Width*4+DataX*4+0] = R;
Data[DataY*Width*4+DataX*4+1] = G;
Data[DataY*Width*4+DataX*4+2] = B;
Data[DataY*Width*4+DataX*4+3] = A;
}
|
3,478 | __global__ void intrinsic(float *ptr){
*ptr = __powf(*ptr, 2.0f);
}
__global__ void standard(float *ptr){
*ptr = powf(*ptr, 2.0f);
} |
3,479 | #include "includes.h"
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index / spatial) % filters;
x[index] = (x[index] - mean[f]) / (sqrtf(variance[f] + .00001f));
} |
3,480 | __global__ void depth_conv_small(const float * __restrict__ bottom_data,float *top_data, const float *__restrict__ weights,
int channels,int kernel_single_size,int spatial_dim_in,int spatial_dim_out,
int spatial_dim_add_padding,int padding,int stride)
{
int kernel_size = kernel_single_size*kernel_single_size;
extern __shared__ float bottom_data_shared[];
__shared__ float weights_shared[9];
const int warpid = threadIdx.x / 32;
const int warp_num = blockDim.x / 32;
const int laneid = threadIdx.x % 32;
const int offset = blockIdx.x * spatial_dim_in * spatial_dim_in;
for(int i = threadIdx.x;i<spatial_dim_add_padding*spatial_dim_add_padding;i+=128){
bottom_data_shared[i] = 0.f;
}
__syncthreads();
for( int i = warpid; i < spatial_dim_in; i += warp_num )
{
if( laneid < spatial_dim_in ){
bottom_data_shared[spatial_dim_add_padding + padding + spatial_dim_add_padding*i + laneid] = __ldg(bottom_data+offset + spatial_dim_in*i + laneid);
}
}
int weights_index = (blockIdx.x%channels)*kernel_size;
if(threadIdx.x<kernel_size) weights_shared[threadIdx.x] = __ldg(weights+weights_index+threadIdx.x);
__syncthreads();
int top_index = blockIdx.x*spatial_dim_out*spatial_dim_out;
float sum = 0;
for(int i = warpid;i < spatial_dim_out;i += warp_num)
{
int index = laneid*stride;
if( index <= spatial_dim_add_padding-kernel_single_size){
sum = bottom_data_shared[stride*padding*i*spatial_dim_add_padding+index] * weights_shared[0];
sum += bottom_data_shared[stride*padding*i*spatial_dim_add_padding+index+1] * weights_shared[1];
sum += bottom_data_shared[stride*padding*i*spatial_dim_add_padding+index+2] * weights_shared[2];
sum += bottom_data_shared[stride*padding*i*spatial_dim_add_padding+index+spatial_dim_add_padding] * weights_shared[3];
sum += bottom_data_shared[stride*padding*i*spatial_dim_add_padding+index+spatial_dim_add_padding+1] * weights_shared[4];
sum += bottom_data_shared[stride*padding*i*spatial_dim_add_padding+index+spatial_dim_add_padding+2] * weights_shared[5];
sum += bottom_data_shared[stride*padding*i*spatial_dim_add_padding+index+spatial_dim_add_padding*2] * weights_shared[6];
sum += bottom_data_shared[stride*padding*i*spatial_dim_add_padding+index+spatial_dim_add_padding*2+1] * weights_shared[7];
sum += bottom_data_shared[stride*padding*i*spatial_dim_add_padding+index+spatial_dim_add_padding*2+2] * weights_shared[8];
top_data[top_index + spatial_dim_out*i + laneid] = sum;
}
}
}
__global__ void depth_conv_big(const float *__restrict__ bottom_data_gpu, float *top_data_gpu, const float *__restrict__ weights_gpu,
int channels,int kernel_single_size,
int spatial_dim_in,int spatial_dim_out,int spatial_dim_add_padding,
int padding,int stride)
{
extern __shared__ float bottom_data_shared[];
__shared__ float weights_shared[9];
const int tidx = threadIdx.y * blockDim.x + threadIdx.x;
for(int i = tidx; i<spatial_dim_add_padding*kernel_single_size; i += blockDim.x * blockDim.y) {
bottom_data_shared[i] = 0.f;
}
__syncthreads();
int tid = (blockIdx.x/spatial_dim_out)*spatial_dim_in*spatial_dim_in;
int height_index = (blockIdx.x % spatial_dim_out) * stride + threadIdx.y - padding; //-1
if((unsigned int)height_index < spatial_dim_in) {
for(int w = threadIdx.x; w < spatial_dim_in; w += blockDim.x)
{
bottom_data_shared[threadIdx.y * spatial_dim_add_padding + w + padding] = __ldg(bottom_data_gpu + tid + height_index * spatial_dim_in + w);
}
}
if( threadIdx.y == 0 && threadIdx.x < 9 )
{
int threadblock_index_per_batch = blockIdx.x % (channels * spatial_dim_out);
weights_shared[threadIdx.x] = __ldg(weights_gpu + ( threadblock_index_per_batch / spatial_dim_out ) * 9 + threadIdx.x);
}
__syncthreads();
float sum = 0.f;
for(int i = threadIdx.x * stride; i <= spatial_dim_add_padding-kernel_single_size; i += stride * blockDim.x) {
sum = bottom_data_shared[threadIdx.y * spatial_dim_add_padding + i] * weights_shared[threadIdx.y * 3];
sum += bottom_data_shared[threadIdx.y * spatial_dim_add_padding + i + 1] * weights_shared[threadIdx.y * 3 + 1];
sum += bottom_data_shared[threadIdx.y * spatial_dim_add_padding + i + 2] * weights_shared[threadIdx.y * 3 + 2];
atomicAdd(top_data_gpu + (blockIdx.x/spatial_dim_out)*spatial_dim_out*spatial_dim_out + (blockIdx.x%spatial_dim_out)*spatial_dim_out + i / stride,sum);
}
}
/*
depth_conv_big<<<mobilenet_channels*out_w,dim3(32, 3),(w+2*pad)*kernelsize*sizeof(float)>>>
(bottom_data,top_data,weights_gpu,mobilenet_channels,size,w,out_w,(w+2*pad),pad,stride);
*/
/*
depth_conv_small<<<mobilenet_channels,128,(w+2*pad)*(w+2*pad)*sizeof(float)>>>
(bottom_data,top_data,weights_gpu,mobilenet_channels,size,w,out_w,(w+2*pad),pad,stride);
*/
|
3,481 | /*
Uses N blocks with N threads
SOR Stokes Flow with no slip b.c. on top/bottom and no flux b.c. on left/right written by Dmitriy Kats
Inputs: N is the number of grid points in each direction,
mu is the viscosity
Pdiff is the pressure drop in the x direction
omega is the SOR factor
toltau is the tolerance of the residual
Outputs: The final velocities and pressure
*/
#include <stdlib.h>
#include <stdio.h>
#include<math.h>
#include <time.h>
//Kernels to udpate u, v, and p
//The inputs also considers if it is a red or black point udpate
__global__ void update_u(double* U, double* Uresid, double* P, double* Presid, double* FAC1, double* OMEGA, int RedorBlack);
__global__ void update_v(double* V, double* Vresid, double* P, double* Presid, double* FAC1, double* OMEGA, int RedorBlack);
__global__ void update_p(double* U, double* V, double* P, double* Presid, double* FAC1, double* OMEGA, double* Pdiff, int RedorBlack);
__device__ static int dev_N;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main (int argc, char * argv[]){
// Choose the GPU card
cudaDeviceProp prop;
int dev;
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.multiProcessorCount = 13;
cudaChooseDevice(&dev, &prop);
cudaSetDevice(dev);
// Create the CUDA events that will be used for timing the kernel function
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Click, the timer has started running
cudaEventRecord(start, 0);
int N;
double mu, pdiff, omega, toltau;
N=atoi(argv[1]);
mu=atof(argv[2]);
pdiff=atof(argv[3]);
omega=atof(argv[4]);
toltau=atof(argv[5]);
double dx=1.0/((double)N-1.0);
double fac1=dx/mu; //precompute the factor
double residABSMAX = 99.0;
int numberOfIterations=0;
double* dev_fac1;
double* dev_omega;
double* dev_pdiff;
double *dev_u, *dev_uresid;
double *dev_v, *dev_vresid;
double *dev_p, *dev_presid;
//allocate memory for the velocities and pressure
double *u = (double*)malloc(N*(N-1)*sizeof(double));
double *uresid = (double*)malloc(N*(N-1)*sizeof(double));
double *v = (double*)malloc((N-1)*N*sizeof(double));
double *vresid = (double*)malloc((N-1)*N*sizeof(double));
double *p = (double*)malloc((N+1)*(N-1)*sizeof(double));
double *presid = (double*)malloc((N+1)*(N-1)*sizeof(double));
//allocate Cuda memory
cudaMalloc((void**)&dev_fac1, sizeof(double));
cudaMalloc((void**)&dev_omega, sizeof(double));
cudaMalloc((void**)&dev_pdiff, sizeof(double));
cudaMalloc((void**)&dev_u, N*(N-1)*sizeof(double));
cudaMalloc((void**)&dev_uresid, N*(N-1)*sizeof(double));
cudaMalloc((void**)&dev_v, (N-1)*N*sizeof(double));
cudaMalloc((void**)&dev_vresid, (N-1)*N*sizeof(double));
cudaMalloc((void**)&dev_p, (N+1)*(N-1)*sizeof(double));
cudaMalloc((void**)&dev_presid, (N+1)*(N-1)*sizeof(double));
//Intialize to zero
int i, j;
for(i=0; i<N; i++)
{
for(j=0; j<N-1; j++)
{
u[i+j*N]=0.0;
uresid[i+j*N]=0.0;
}
}
for(i=0; i<N-1; i++)
{
for(j=0; j<N; j++)
{
v[i+j*(N-1)]=0.0;
vresid[i+j*(N-1)]=0.0;
}
}
for(i=0; i<N+1; i++)
{
for(j=0; j<N-1; j++)
{
p[i+j*(N+1)]=0.0;
presid[i+j*(N+1)]=0.0;
}
}
//Copy the values to the device
cudaMemcpy(dev_u, u, N*(N-1)*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_uresid, uresid, N*(N-1)*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_v, v, (N-1)*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_vresid, vresid, (N-1)*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_p, p, (N+1)*(N-1)*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_presid, presid, (N+1)*(N-1)*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(dev_N, &N, sizeof(int));
cudaMemcpy(dev_fac1, &fac1, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_omega, &omega, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_pdiff, &pdiff, sizeof(double), cudaMemcpyHostToDevice);
dim3 meshDim(N,N); //This one will be for the velocities
dim3 meshDim2(N+1,N); //This one will be for the pressure
while(residABSMAX>=toltau)
{
residABSMAX=0.1*toltau;
//Solve in the next six lines
update_u<<<meshDim,1>>>(dev_u, dev_uresid, dev_p, dev_presid, dev_fac1, dev_omega, 0);
update_u<<<meshDim,1>>>(dev_u, dev_uresid, dev_p, dev_presid, dev_fac1, dev_omega, 1);
update_v<<<meshDim,1>>>(dev_v, dev_vresid, dev_p, dev_presid, dev_fac1, dev_omega, 0);
update_v<<<meshDim,1>>>(dev_v, dev_vresid, dev_p, dev_presid, dev_fac1, dev_omega, 1);
update_p<<<meshDim2,1>>>(dev_u, dev_v, dev_p, dev_presid, dev_fac1, dev_omega, dev_pdiff, 0);
update_p<<<meshDim2,1>>>(dev_u, dev_v, dev_p, dev_presid, dev_fac1, dev_omega, dev_pdiff, 1);
//This is slow but I ran out of time
//Copy the residuals to the host to find the max residual
cudaMemcpy(uresid, dev_uresid, N*(N-1)*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(vresid, dev_vresid, (N-1)*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(presid, dev_presid, (N+1)*(N-1)*sizeof(double), cudaMemcpyDeviceToHost);
for(i=0; i<N; i++)
{
for(j=0; j<N-1; j++)
{
if(fabs(uresid[i+j*N])>residABSMAX)
{
residABSMAX=fabs(uresid[i+j*N]);
}
}
}
for(i=0; i<N-1; i++)
{
for(j=0; j<N; j++)
{
if(fabs(vresid[i+j*(N-1)])>residABSMAX)
{
residABSMAX=fabs(vresid[i+j*(N-1)]);
}
}
}
for(i=0; i<N+1; i++)
{
for(j=0; j<N-1; j++)
{
if(fabs(presid[i+j*(N+1)])>residABSMAX)
{
residABSMAX=fabs(presid[i+j*(N+1)]);
}
}
}
//Check for errors
gpuErrchk(cudaPeekAtLastError() );
gpuErrchk(cudaDeviceSynchronize() );
numberOfIterations+=1;
if (numberOfIterations>10000)
{ //fail safe to save data and exit
cudaMemcpy(u, dev_u, N*(N-1)*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(v, dev_v, (N-1)*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(p, dev_p, (N+1)*(N-1)*sizeof(double), cudaMemcpyDeviceToHost);
printf("Reached fail safe. The max residual is %10e. The number of iterations is %i\n", residABSMAX, numberOfIterations);
FILE *fpu = fopen("StokesU.out", "wb");
fwrite(u, sizeof(double), N*(N-1), fpu);
fclose (fpu);
FILE *fpv = fopen("StokesV.out", "wb");
fwrite(v, sizeof(double), (N-1)*N, fpv);
fclose (fpv);
FILE *fpP = fopen("StokesP.out", "wb");
fwrite(p, sizeof(double), (N+1)*(N-1), fpP);
fclose (fpP);
cudaFree(dev_u);
cudaFree(dev_uresid);
cudaFree(dev_v);
cudaFree(dev_vresid);
cudaFree(dev_p);
cudaFree(dev_presid);
cudaFree(dev_fac1);
cudaFree(dev_omega);
cudaFree(dev_pdiff);
free(u);
free(uresid);
free(v);
free(vresid);
free(p);
free(presid);
return 0;
}
}
cudaMemcpy(u, dev_u, N*(N-1)*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(v, dev_v, (N-1)*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(p, dev_p, (N+1)*(N-1)*sizeof(double), cudaMemcpyDeviceToHost);
//export the data
FILE *fpu = fopen("StokesU.out", "wb");
fwrite(u, sizeof(double), N*(N-1), fpu);
fclose (fpu);
FILE *fpv = fopen("StokesV.out", "wb");
fwrite(v, sizeof(double), (N-1)*N, fpv);
fclose (fpv);
FILE *fpP = fopen("StokesP.out", "wb");
fwrite(p, sizeof(double), (N+1)*(N-1), fpP);
fclose (fpP);
//stop the timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// The elapsed time is computed by taking the difference between start and stop
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("N:%i omega:%f\n", N, omega);
printf("The max residual is %10e and the number of iterations is %i\n", residABSMAX, numberOfIterations);
printf("Time: %gms\n", elapsedTime);
//clean up timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(dev_u);
cudaFree(dev_uresid);
cudaFree(dev_v);
cudaFree(dev_vresid);
cudaFree(dev_p);
cudaFree(dev_presid);
cudaFree(dev_fac1);
cudaFree(dev_omega);
free(u);
free(uresid);
free(v);
free(vresid);
free(p);
free(presid);
return 0;
}
__global__ void update_u(double* U, double* Uresid, double* P, double* Presid, double* FAC1, double* OMEGA, int RorB)
{
int EvenOrOdd=(blockIdx.x+blockIdx.y)%2;
int u_ij00 = blockIdx.x + blockIdx.y * gridDim.x;
int u_ijp0 = (blockIdx.x + 1)%gridDim.x + blockIdx.y * gridDim.x; //down for u
int u_ijm0 = (blockIdx.x + gridDim.x - 1)%gridDim.x + blockIdx.y * gridDim.x; //up for u
int u_ij0p = blockIdx.x + ((blockIdx.y + 1)%gridDim.y) * gridDim.x; //east for u
int u_ij0m = blockIdx.x + ((blockIdx.y + gridDim.y - 1)%gridDim.y) * gridDim.x; //west for u
int p_ij00 = blockIdx.x + blockIdx.y * (gridDim.x+1);
int p_ijp0 = (blockIdx.x + 1)%(gridDim.x+1) + blockIdx.y * (gridDim.x+1); //down for p
//int p_ijm0 = (blockIdx.x + gridDim.x)%(gridDim.x+1) + blockIdx.y * (gridDim.x+1); //up for p
//int p_ij0p = blockIdx.x + ((blockIdx.y + 1)%gridDim.y) *(gridDim.x+1); //east for p
//int p_ij0m = blockIdx.x + ((blockIdx.y + gridDim.y - 1)%gridDim.y) * (gridDim.x+1); //west for p
//UPDATE INLET
if (blockIdx.y==0 && blockIdx.x==0 && EvenOrOdd==RorB)
{ //Corner point
Uresid[u_ij00]= (-U[u_ij00]+ U[u_ijp0])+(-3.0*U[u_ij00]+U[u_ij0p])-*FAC1*(P[p_ijp0]-P[p_ij00]);
U[u_ij00]=U[u_ij00]+*OMEGA*Uresid[u_ij00];
}
if (blockIdx.y>0 && blockIdx.y<(dev_N-2) && blockIdx.x==0 && EvenOrOdd==RorB)
{ //Middle points
Uresid[u_ij00]=(-U[u_ij00]+ U[u_ijp0])+(U[u_ij0m]-2.0*U[u_ij00]+U[u_ij0p])-*FAC1*(P[p_ijp0]-P[p_ij00]);
U[u_ij00]=U[u_ij00]+*OMEGA*Uresid[u_ij00];
}
if (blockIdx.y==(dev_N-2) && blockIdx.x==0 && EvenOrOdd==RorB)
{ //Corner point
Uresid[u_ij00]= (-U[u_ij00]+ U[u_ijp0])+(U[u_ij0m]-3.0*U[u_ij00])-*FAC1*(P[p_ijp0]-P[p_ij00]);
U[u_ij00]=U[u_ij00]+*OMEGA*Uresid[u_ij00];
}
//UPDATE BULK
if (blockIdx.y==0 && blockIdx.x>0 && blockIdx.x<(dev_N-1)&& EvenOrOdd==RorB)
{ // boundary condition
Uresid[u_ij00]= (U[u_ijm0]-2.0*U[u_ij00]+ U[u_ijp0])+(-3.0*U[u_ij00]+U[u_ij0p])-*FAC1*(P[p_ijp0]-P[p_ij00]);
U[u_ij00]=U[u_ij00]+*OMEGA*Uresid[u_ij00];
}
if (blockIdx.y>0 && blockIdx.y<(dev_N-2) && blockIdx.x>0 && blockIdx.x<(dev_N-1)&& EvenOrOdd==RorB)
{ //interior
Uresid[u_ij00]= (U[u_ijm0]-2.0*U[u_ij00]+ U[u_ijp0])+(U[u_ij0m]-2.0*U[u_ij00]+U[u_ij0p])-*FAC1*(P[p_ijp0]-P[p_ij00]);
U[u_ij00]=U[u_ij00]+*OMEGA*Uresid[u_ij00];
}
if (blockIdx.y==(dev_N-2) && blockIdx.x>0 && blockIdx.x<(dev_N-1)&& EvenOrOdd==RorB)
{ //boundary condition
Uresid[u_ij00]= (U[u_ijm0]-2.0*U[u_ij00]+ U[u_ijp0])+(U[u_ij0m]-3.0*U[u_ij00])-*FAC1*(P[p_ijp0]-P[p_ij00]);
U[u_ij00]=U[u_ij00]+*OMEGA*Uresid[u_ij00];
}
//Update Outlet
if (blockIdx.y==0 && blockIdx.x==(dev_N-1)&& EvenOrOdd==RorB)
{ //boundary condition
Uresid[u_ij00]= (U[u_ijm0]-U[u_ij00])+(-3.0*U[u_ij00]+U[u_ij0p])-*FAC1*(P[p_ijp0]-P[p_ij00]);
U[u_ij00]=U[u_ij00]+*OMEGA*Uresid[u_ij00];
}
if (blockIdx.y>0 && blockIdx.y<(dev_N-2) && blockIdx.x==(dev_N-1)&& EvenOrOdd==RorB)
{ //middle points on outlet
Uresid[u_ij00]= (U[u_ijm0]-U[u_ij00])+(U[u_ij0m]-2.0*U[u_ij00]+U[u_ij0p])-*FAC1*(P[p_ijp0]-P[p_ij00]);
U[u_ij00]=U[u_ij00]+*OMEGA*Uresid[u_ij00];
}
if (blockIdx.y==(dev_N-2) && blockIdx.x==(dev_N-1)&& EvenOrOdd==RorB)
{ //boundary node
Uresid[u_ij00]= (U[u_ijm0]-U[u_ij00])+(U[u_ij0m]-3.0*U[u_ij00])-*FAC1*(P[p_ijp0]-P[p_ij00]);
U[u_ij00]=U[u_ij00]+*OMEGA*Uresid[u_ij00];
}
__syncthreads();
}
__global__ void update_v(double* V, double* Vresid, double* P, double* Presid, double* FAC1, double* OMEGA, int RorB)
{
int EvenOrOdd=(blockIdx.x+blockIdx.y)%2;
int v_ij00 = blockIdx.x + blockIdx.y * (gridDim.x-1);
int v_ijp0 = (blockIdx.x + 1)%(gridDim.x-1) + blockIdx.y * (gridDim.x-1); //down for v
int v_ijm0 = (blockIdx.x + gridDim.x - 2)%(gridDim.x-1) + blockIdx.y * (gridDim.x-1); //up for v
int v_ij0p = blockIdx.x + ((blockIdx.y + 1)%gridDim.y) * (gridDim.x-1); //east for v
int v_ij0m = blockIdx.x + ((blockIdx.y + gridDim.y - 1)%gridDim.y) * (gridDim.x-1); //west for v
//int p_ij00 = blockIdx.x + blockIdx.y * (gridDim.x+1);
int p_ijp0 = (blockIdx.x + 1)%(gridDim.x+1) + blockIdx.y * (gridDim.x+1); //down for p
//int p_ijm0 = (blockIdx.x + gridDim.x)%(gridDim.x+1) + blockIdx.y * (gridDim.x+1); //up for p
//int p_ij0p = blockIdx.x + ((blockIdx.y + 1)%gridDim.y) *(gridDim.x+1); //east for p
//int p_ij0m = blockIdx.x + ((blockIdx.y + gridDim.y - 1)%gridDim.y) * (gridDim.x+1); //west for p
int p_ijpm = (blockIdx.x + 1)%(gridDim.x+1) + ((blockIdx.y + gridDim.y - 1)%gridDim.y) * (gridDim.x+1); //sw for p
//Update inlet similarly to above
if (blockIdx.y==0 && blockIdx.x==0 && EvenOrOdd==RorB)
{ //no velocity boundary condition
Vresid[v_ij00]= 0.0;
V[v_ij00]=0.0;
}
if (blockIdx.y>0 && blockIdx.y<(dev_N-1) && blockIdx.x==0 && EvenOrOdd==RorB)
{
Vresid[v_ij00]=(-V[v_ij00]+ V[v_ijp0])+(V[v_ij0m]-2.0*V[v_ij00]+V[v_ij0p])-*FAC1*(P[p_ijp0]-P[p_ijpm]);
V[v_ij00]=V[v_ij00]+*OMEGA*Vresid[v_ij00];
}
if (blockIdx.y==(dev_N-1) && blockIdx.x==0 && EvenOrOdd==0)
{
Vresid[v_ij00]= 0.0;
V[v_ij00]=0.0;
}
//Update Bulk similarly to above
if (blockIdx.y==0 && blockIdx.x>0 && blockIdx.x<(dev_N-2)&& EvenOrOdd==RorB)
{
Vresid[v_ij00]= 0.0;
V[v_ij00]=0.0;
}
if (blockIdx.y>0 && blockIdx.y<(dev_N-1) && blockIdx.x>0 && blockIdx.x<(dev_N-2)&& EvenOrOdd==RorB)
{
Vresid[v_ij00]=(V[v_ijm0]-2.0*V[v_ij00]+ V[v_ijp0])+(V[v_ij0m]-2.0*V[v_ij00]+V[v_ij0p])-*FAC1*(P[p_ijp0]-P[p_ijpm]);
V[v_ij00]=V[v_ij00]+*OMEGA*Vresid[v_ij00];
}
if (blockIdx.y==(dev_N-1) && blockIdx.x>0 && blockIdx.x<(dev_N-2)&& EvenOrOdd==RorB)
{
Vresid[v_ij00]=0.0;
V[v_ij00]=0.0;
}
//Update Outlet
if (blockIdx.y==0 && blockIdx.x==(dev_N-2)&& EvenOrOdd==RorB)
{
Vresid[v_ij00]= 0.0;
V[v_ij00]=0.0;
}
if (blockIdx.y>0 && blockIdx.y<(dev_N-1) && blockIdx.x==(dev_N-2)&& EvenOrOdd==RorB)
{
Vresid[v_ij00]=(V[v_ijm0]-V[v_ij00])+(V[v_ij0m]-2.0*V[v_ij00]+V[v_ij0p])-*FAC1*(P[p_ijp0]-P[p_ijpm]);
V[v_ij00]=V[v_ij00]+*OMEGA*Vresid[v_ij00];
}
if (blockIdx.y==(dev_N-1) && blockIdx.x==(dev_N-2)&& EvenOrOdd==RorB)
{
Vresid[v_ij00]= 0.0;
V[v_ij00]=0.0;
}
__syncthreads();
}
__global__ void update_p(double* U, double* V, double* P, double* Presid, double* FAC1, double* OMEGA, double* Pdiff, int RorB)
{
int EvenOrOdd=((int) (blockIdx.x+blockIdx.y)%2);
int u_ij00 = blockIdx.x + blockIdx.y * (gridDim.x-1);
int u_ijm0 = (blockIdx.x + gridDim.x - 2)%(gridDim.x-1) + blockIdx.y * (gridDim.x-1); //up for u
int v_ijm0 = (blockIdx.x + gridDim.x - 3)%(gridDim.x-2) + blockIdx.y * (gridDim.x-2); //up for v
int v_ijmp = (blockIdx.x + gridDim.x - 3)%(gridDim.x-2) + ((blockIdx.y + 1)%gridDim.y) * (gridDim.x-2);
int p_ij00 = blockIdx.x + blockIdx.y * (gridDim.x);
int p_ijp0 = (blockIdx.x + 1)%(gridDim.x) + blockIdx.y * (gridDim.x); //down for p
int p_ijm0 = (blockIdx.x + gridDim.x-1)%(gridDim.x) + blockIdx.y * (gridDim.x); //up for p
//Update the boundary with the right pressure drop
if (blockIdx.y<(dev_N-1) && blockIdx.x==0 && EvenOrOdd==RorB)
{
Presid[p_ij00]=2.0*(*Pdiff)-P[p_ijp0]-P[p_ij00];
P[p_ij00]=2.0*(*Pdiff)-P[p_ijp0];
}
//Update interior nodes
if (blockIdx.y<(dev_N-1) && blockIdx.x>0 && blockIdx.x<(dev_N) && EvenOrOdd==RorB)
{
Presid[p_ij00]=-(U[u_ij00]-U[u_ijm0])-(V[v_ijmp]-V[v_ijm0]);
P[p_ij00]=P[p_ij00]+*OMEGA*Presid[p_ij00];
}
//Update boundary conditions
if (blockIdx.y<(dev_N-1) && blockIdx.x==(dev_N) && EvenOrOdd==RorB)
{
P[p_ij00]=-P[p_ijm0];
}
__syncthreads();
}
|
3,482 | /* Write GPU kernels to compete the functionality of estimating the integral via the trapezoidal rule. */
#define F(n) ((n) + 1)/sqrt((n) * (n) + (n) + 1)
__global__ void trap_kernel(float a, float b, int n, float h, float *Result_Vector)
{
__shared__ float p[128];
unsigned int column = blockIdx.x * blockDim.x + threadIdx.x + 1;
unsigned int stride = blockDim.x * gridDim.x;
double temp;
double sum = 0;
for(unsigned int i = column; i < n; i += stride)
{
temp = a + (i * h);
sum += F(temp);
}
p[threadIdx.x] = sum;
for(unsigned int j = 1; (j << 1) <= blockDim.x; j <<= 1)
{
__syncthreads();
if (threadIdx.x + j < blockDim.x)
{
p[threadIdx.x] += p[threadIdx.x + j];
}
}
if (threadIdx.x == 0)
{
Result_Vector[blockIdx.x] = p[0];
}
} |
3,483 | #include "stdio.h"
#define DIM 8
const int THREADS_PER_BLOCK = 8;
const int NUM_BLOCKS = 8;
__global__ void add(int *a, int *c)
{
__shared__ int cache[THREADS_PER_BLOCK];
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int cacheIndex = threadIdx.x;
int temp = 0;
temp = a[tid];
cache[cacheIndex] = temp;
int i = blockDim.x / 2;
while (i > 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (threadIdx.x == 0) // if at thread 0 in this block
c[blockIdx.x] = cache[0]; // save the sum in global memory
}
int main()
{
int a[DIM][DIM], c[DIM];
int *dev_a, *dev_c;
cudaMalloc((void **)&dev_a, DIM * DIM * sizeof(int));
cudaMalloc((void **)&dev_c, DIM * sizeof(int));
for (int y = 0; y < DIM; y++) // Fill Arrays
for (int x = 0; x < DIM; x++)
a[y][x] = 7;
for (int i = 0; i < DIM; ++i)
{
c[i] = 0;
}
cudaMemcpy(dev_a, a, DIM * DIM * sizeof(int), cudaMemcpyHostToDevice);
add<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>(dev_a, dev_c);
cudaMemcpy(c, dev_c, DIM * sizeof(int), cudaMemcpyDeviceToHost);
int total = 0;
for (int i = 0; i < DIM; ++i)
{
total += c[i];
}
printf("Total sum of all elements is: %d\n", total);
cudaFree(dev_a);
cudaFree(dev_c);
return 0;
}
|
3,484 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
int main() {
int count;
cudaGetDeviceCount(&count);
cudaDeviceProp prop;
for (int i = 0; i < count; ++i) {
cudaGetDeviceProperties(&prop, i);
cout << "Device" << i << prop.name << endl;
cout << "Compute capability" << prop.major << "." << prop.minor << "." << endl;
cout << "Max grid dimensions: ("
<< prop.maxGridSize[0] << " x "
<< prop.maxGridSize[1] << " x "
<< prop.maxGridSize[2] << ")" << endl;
cout << "Max block dimensions: ("
<< prop.maxThreadsDim[0] << " x "
<< prop.maxThreadsDim[1] << " x "
<< prop.maxThreadsDim[2] << ")" << endl;
}
return 0;
}
|
3,485 | #include "includes.h"
__global__ void vectorAdd(int* a, int* b, int* c, int n) {
// Calculate global thread ID (tid)
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Vector boundary guard
if (tid < n) {
// Each thread adds a single element
c[tid] = a[tid] + b[tid];
}
} |
3,486 | #include "includes.h"
__global__ void gpu_vector_add(float *out, float *a, float *b, int n) {
// built-in variable blockDim.x describes amount threads per block
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n)
out[tid] = a[tid] + b[tid];
// more advanced version - handling arbitrary vector/kernel size
// int i = blockIdx.x * blockDim.x + threadIdx.x;
// int step = gridDim.x * blockDim.x;
// for(; i < n; i += step){
// out[i] = a[i] + b[i];
// }
} |
3,487 | // Use grid strided loops, descriped here:
// https://devblogs.nvidia.com/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/
// This pattern ensures that all of the loop values are visited once, no matter
// what grid parameters are used for the function.
extern "C" __global__
void seq2col(float* output,
const float* X, int nW, int B, int I)
{
// Let's say nW is 1 (it usually is). Then we want to take:
// 1a 1b 1c
// 2a 2b 2c
// 3a 3b 3c
// And make
// __ __ __ 1a 1b 1c 2a 2b 2c
// 1a 1b 1c 2a 2b 2c 3a 3b 3c
// 2a 2b 2c 3a 3b 3c __ __ __
// Where __ is padding.
// Now let's say nW is 2. Then we want to take:
// 1a 1b 1c
// 2a 2b 2c
// 3a 3b 3c
// And make
// __ __ __ __ __ __ 1a 1b 1c 2a 2b 2c 3a 3b 3c
// __ __ __ 1a 1b 1c 2a 2b 2c 3a 3b 3c __ __ __
// 1a 1b 1c 2a 2b 2c 3a 3b 3c __ __ __ __ __ __
// * x_start=-6, x_end=9 : (0-2) * 3, (0+2+1) * 3
// * x_start=-3, x_end=13 : (1-2) * 3, (1+2+1) * 3
// * x_start=0, x_end=16 : (2-2) * 3, (2+2+1) * 3
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
int nF = nW * 2 + 1;
for (int b = _loop_start; b < B; b += _loop_stride)
{
int o_start = b * I * nF;
// Let's say b=0, nW=1, I=10, B=20
// x_start = (0-1) * 10 : -10
// x_end = (0+1+1)*10 : 20
// o_start = (0*0*3) = 0
int x_start = (b-nW) * I;
int x_end = (b+nW+1) * I;
if (x_start < 0)
{
// Adjust o_start to 10, because we're skipping
// the first feature
o_start += -x_start;
x_start = 0;
}
if (x_end >= (B * I))
{
x_end = B * I;
}
// cpy_length = 20-0 : 20
// Unsure which memcpy function to use on CUDA..
// Shrug, just write the loop...
int cpy_length = x_end - x_start;
for (int i=0; i<cpy_length; ++i)
{
// Write the region output[10:30] = X[0:20]
output[o_start+i] = X[x_start+i];
}
}
}
extern "C" __global__
void maxout(float* best, int* which,
const float* cands, int B, int O, int P)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int b = _loop_start; b < B; b += _loop_stride)
{
// Go to the regions we're working on
float* best_b = &best[b*O];
int* which_b = &which[b*O];
for (int i=0; i < O; ++i)
{
const float* cands_bi = &cands[b*O*P+(i*P)];
which_b[i] = 0;
best_b[i] = cands_bi[0];
for (int p=1; p < P; ++p)
{
if (cands_bi[p] > best_b[i])
{
which_b[i] = p;
best_b[i] = cands_bi[p];
}
}
}
}
}
extern "C" __global__
void mish(float* Y, const float* X, float threshold, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
float one = 1.;
for (int i = _loop_start; i < N; i += _loop_stride)
{
if (X[i] >= threshold)
Y[i] = X[i];
else
Y[i] = X[i] * tanhf(logf(one + expf(X[i])));
}
}
extern "C" __global__
void sum_pool(float* output,
const float* X, const int* lengths, int B, int T, int O)
{
// Compute sums of a batch of concatenated sequences
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int b = _loop_start; b < B; b += _loop_stride)
{
// Go to the regions we're working on
float* output_b = &output[b*O];
// Find the sequence item we're working on
int t = 0;
for (int i=0; i < b; ++i) {
t += lengths[i];
}
int length = lengths[b];
// Each invocation of the kernel sums one batch.
for (int i=0; i < length; ++i) // Iterate over rows
{
const float* X_t = &X[(t+i)*O];
for (int j=0; j < O; ++j)
{
output_b[j] += X_t[j];
}
}
}
}
extern "C" __global__
void max_pool(float* maxes, int* which,
const float* X, const int* lengths, int B, int T, int O)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int b = _loop_start; b < B; b += _loop_stride)
{
// Go to the regions we're working on
float* maxes_b = &maxes[b*O];
int* which_b = &which[b*O];
// Find the sequence item we're working on
const float* X_t = X;
for (int i=0; i < b; ++i) {
X_t += lengths[i] * O;
}
// Each invocation of the kernel maxes one sequence.
// Start by assuming maxes are the first element.
for (int i=0; i < O; ++i) {
maxes_b[i] = X_t[i];
which_b[i] = 0;
}
int length = lengths[b];
for (int i=1; i < length; ++i) // Iterate over rows
{
X_t += O;
for (int j=0; j < O; ++j)
{
if (X_t[j] > maxes_b[j])
{
maxes_b[j] = X_t[j];
which_b[j] = i;
}
}
}
}
}
extern "C" __global__
void backprop_seq2col(float* d_seqs,
const float* d_cols, int nW, int B, int I)
{
// Here's what we're doing, if we had 2d indexing.
//for i in range(B):
// d_seq[i] += d_cols[i-2, 4]
// d_seq[i] += d_cols[i-1, 3]
// d_seq[i] += d_cols[i, 2]
// d_seq[i] += d_cols[i+1, 1]
// d_seq[i] += d_cols[i+2, 0]
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
int nF = nW * 2 + 1;
int end_d_cols = B * I * nF;
for (int b = _loop_start; b < B; b += _loop_stride)
{
float* d_seqs_b = &d_seqs[b*I];
int col_feat = nF * I;
for (int f=-nW; f < (nW+1); ++f)
{
int col_row = (b+f) * (I*nF);
col_feat -= I;
if ((col_row >= 0) && (col_row < end_d_cols))
{
int start = col_row + col_feat;
if ((start >= 0) && ((start+I) < end_d_cols))
{
for (int i=0; i < I; ++i)
d_seqs_b[i] += d_cols[start+i];
}
}
}
}
}
extern "C" __global__
void backprop_maxout(float* dX,
const float* dY, const int* which, int B, int O, int P)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int b = _loop_start; b < B; b += _loop_stride)
{
// Go to the regions we're working on
float* dX_b = &dX[b*O*P];
const float* dY_b = &dY[b*O];
const int* which_b = &which[b*O];
for (int i=0; i < O; ++i)
dX_b[(i*P)+which_b[i]] = dY_b[i];
}
}
extern "C" __global__
void backprop_mish(float* dX,
const float* dY, const float* X, float threshold, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
float two = 2.;
for (int i = _loop_start; i < N; i += _loop_stride)
{
float x = X[i];
if (x >= threshold)
{
dX[i] = dY[i];
} else
{
float exp_x = exp(x);
float exp_2x = exp(2*x);
float exp_3x = exp(3*x);
float omega = (4. * (x+1)) + (4 * exp_2x) + exp_3x + exp_x * (4.*x+6);
float delta = 2 * exp_x + exp_2x + 2;
dX[i] = dY[i] * ((exp_x * omega) / pow(delta, two));
}
}
}
extern "C" __global__
void backprop_sum_pool(float* dX, const float* d_sum, const int* lengths,
int B, int T, int O)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
int seq_start = 0;
int b = 0;
for (int t = _loop_start; t < T; t += _loop_stride)
{
// Find the sequence item we're working on
while ((b < B) && (seq_start+lengths[b]) < t)
{
seq_start += lengths[b];
b += 1;
}
float* dX_t = &dX[t * O];
const float* d_sum_b = &d_sum[b * O];
for (int i=0; i < O; ++i)
{
dX_t[i] = d_sum_b[i];
}
}
}
extern "C" __global__
void backprop_mean_pool(float* dX, const float* d_mean, const int* lengths,
int B, int T, int O)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
int seq_start = 0;
int b = 0;
for (int t = _loop_start; t < T; t += _loop_stride)
{
// Find the sequence item we're working on
while ((b < B) && (seq_start+lengths[b]) < t)
{
seq_start += lengths[b];
b += 1;
}
float* dX_t = &dX[t * O];
const float* d_mean_b = &d_mean[b * O];
int lengths_b = lengths[b];
for (int i=0; i < O; ++i)
{
dX_t[i] = d_mean_b[i] / lengths_b;
}
}
}
extern "C" __global__
void backprop_max_pool(float* dX,
const float* d_maxes, const int* which, const int* lengths, int B, int T, int O)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
int seq_start = 0;
int b = 0;
for (int t = _loop_start; t < T; t += _loop_stride)
{
// We're calculating the gradient of the unpooled sequences, from
// the gradient of the maxes. In this loop, we're getting the gradient
// of a single sequence item, t. We need to know the sequence index,
// b.
while ((b < B) && (seq_start+lengths[b]) < t)
{
seq_start += lengths[b];
b += 1;
}
// The "which" array tells us which rows were selected as the max.
// So we need to find the index of our t in the sequence.
int index_of_t = t-seq_start;
// Get the rows we're dealing with, to avoid cluttering the loop
// with the index math.
float* dX_t = &dX[t*O];
const float* d_maxes_b = &d_maxes[b*O];
const int* which_b = &which[b*O];
// Now loop over our row.
for (int i=0; i < O; ++i)
{
// If we used the value for this cell,
// pass the gradient
if (which_b[i] == index_of_t)
dX_t[i] = d_maxes_b[i];
}
}
}
|
3,488 | #include <stdio.h>
#define N 100
__global__ void assign(int *arr) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N && tid > 0) {
for (int i = 0; i < 50; i++) {
int tmp = arr[tid-1];
__syncthreads();
arr[tid] = tmp;
//arr[tid] = arr[tid-1]; //false operation
}
}
}
int main() {
int arr[N] = {0};
for (int i = 0; i < N; i++)
arr[i] = i;
int b[N] = {0};
int *dev_arr;
cudaMalloc(&dev_arr, N * sizeof(int));
cudaMemcpy(dev_arr, arr, N * sizeof(int), cudaMemcpyHostToDevice);
assign<<<16, 16>>>(dev_arr);
cudaMemcpy(b, dev_arr, N * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_arr);
for (int i = 0; i < N; i++) {
printf("%d ", b[i]);
}
printf("\n");
}
|
3,489 | #include "stdio.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
void handle_errors(void){
int *d_a;
cudaError_t cudaStatus;
cudaStatus = cudaMalloc((void**)&d_a, sizeof(int));
printf("Status: %d, cudaSuccess: %d\n", cudaStatus, cudaSuccess);
int *h_a;
cudaStatus = cudaMemcpy(d_a,&h_a, sizeof(int), cudaMemcpyHostToDevice);
printf("Status: %d, cudaSuccess: %d\n", cudaStatus, cudaSuccess);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
cudaFree(d_a);
}
cudaStatus = cudaGetLastError();
printf("Status: %d, cudaSuccess: %d\n", cudaStatus, cudaSuccess);
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
void properties(void){
cudaDeviceProp device_Property;
cudaGetDeviceProperties(&device_Property, 0);
printf("Max per procesor: %d\n", device_Property.maxThreadsPerMultiProcessor);
printf("Max per block: %d\n", device_Property.maxThreadsPerBlock);
printf("CUDA stream: %d\n", device_Property.deviceOverlap);
}
int main(void){
handle_errors();
properties();
return 0;
} |
3,490 | #include<cuda_runtime.h>
#include<stdio.h>
int main()
{
int num = 0;
int maxdev = 0;
cudaGetDeviceCount(&num);
if (num > 1)
{
int maxp = 0;
for (int i = 0; i < num; i++)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, i);
if (maxp < props.multiProcessorCount)
{
maxp = props.multiProcessorCount;
maxdev = i;
}
}
cudaSetDevice(maxdev);
}
printf("dev idx: %d\n", maxdev);
}
|
3,491 | #include "gpuMemoryLimitTester.cuh"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
std::fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void populateArrayWithRandomNumbers(int *array, int size)
{
for (int i = 0; i < size; i++)
{
array[i] = rand();
}
};
void checkGpuMemoryLimit()
{
int memoryAmount =30000;
//int memoryIncrement=1000;
int* host_array;
int* device_array;
host_array = (int *)malloc(3000 * sizeof(int));
populateArrayWithRandomNumbers(host_array, 3000);
for (int i = 0; i < 300000; i++)
{
std::cout << "Attempting to allocate gpu memory size of: " << memoryAmount << std::endl;
gpuErrchk(cudaMalloc((void**)&device_array, memoryAmount*sizeof(int)));
//host_array = (int *)malloc(3000*sizeof(int));
//populateArrayWithRandomNumbers(host_array, 3000);
cudaDeviceSynchronize();
gpuErrchk(cudaMemcpy(device_array, host_array, 3000 * sizeof(int), cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
gpuErrchk(cudaFree(device_array));
std::cout << "Succesfully copied data to a: " << memoryAmount << " gpu array" << std::endl << std::endl;
memoryAmount = memoryAmount * 2;//+ memoryIncrement;
}
};
|
3,492 | #include <stdio.h>
#include <cstdlib>
#include <time.h>
#include <stdlib.h>
#include <math.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
//#include <helper_functions.h>
//This function takes 32 elements and computers their all-prefix-sum
//Also, stores the sum of the block in the partial_sums array
//Algorithm taken from here: http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html
__global__ void scan_block(int *A_gpu, int *data, int *partial_sums) {
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
//Thread ID
int id = bx * blockDim.x + tx;
//Chunk of 32 values that will be loaded from products
__shared__ float values[32];
values[tx] = data[id];
//Up-sweep
//Run through 5 times since log2(32) = 5
for (int i = 1; i <= 5; i++) {
int good_lane = 1<<i;
int back = 1<<i-1;
if ((tx + 1) % good_lane == 0) {
values[tx] += values[tx - back];
}
__syncthreads();
}
//Set the last value to 0
values[blockDim.x - 1] = 0;
//Down-sweep
for (int i = 5; i >= 1; i--) {
int good_lane = 1<<i;
int back = 1<<i-1;
if ((tx + 1) % good_lane == 0) {
int temp = values[tx];
values[tx] += values[tx - back];
values[tx - back] = temp;
}
__syncthreads();
}
//Store the values in their proper place in A_gpu
A_gpu[id] = values[tx];
//Keep track of the sum of the block of 32 values for later use
partial_sums[bx] = values[blockDim.x - 1] + data[bx * blockDim.x + blockDim.x - 1];
}
//This function takes each scanned block and adds to each element in it the sum of the previous scanned blocks
__global__ void add_sums(int *A_gpu, int *data, int *partial_sums) {
// Block index
int bx = blockIdx.x;
int block_sum = 0;
// Thread index
int tx = threadIdx.x;
//Thread ID
int id = bx * blockDim.x + tx;
//Find the total sum of the data up to this block
for (int i = 0; i < bx; i++) {
block_sum += partial_sums[i];
}
//Add this sum to all 32 elements in our output array that the block corresponds to
A_gpu[id] += block_sum;
}
int main (int argc, char *argv[]) {
int *A_cpu, *A_gpu, *data;
int n; // size of array
srand(time(NULL)); //random numbers each time
//Make sure user puts in right parameters
if (argc !=2) {
printf("Usage: executable.exe {arraySize}, where arraySize is a positive integer");
exit(1);
}
n = atoi(argv[1]);
// Read number of rows (nr), number of columns (nc) and
// number of elements and allocate memory for row_ptr, indices, data, b and c.
unsigned int mem_size_matrices = (n)*sizeof(int);
data = (int *) malloc (mem_size_matrices);
A_cpu = (int *) malloc (mem_size_matrices);
A_gpu = (int *) malloc (mem_size_matrices);
// Read data in coordinate format and initialize sparse matrix
for (int i=0; i<n; i++) {
int someInt = 1 + rand() % 1000;
data[i] = someInt;
A_cpu[i] = 0;
A_gpu[i] = 0;
}
// MAIN COMPUTATION, SEQUENTIAL VERSION
for (int i=0; i<n; i++) {
int sum = 0;
for (int j = 0; j<i; j++) {
sum += data[j];
}
A_cpu[i] = sum;
}
// Allocate device memory
int *d_A_gpu, *d_data, *d_partial_sums;
cudaError_t error;
error = cudaMalloc((void **) &d_A_gpu, mem_size_matrices);
error = cudaMalloc((void **) &d_data, mem_size_matrices);
error = cudaMalloc((void **) &d_partial_sums, (n/32 + 1)*sizeof(int));
// copy host memory to device
error = cudaMemcpy(d_A_gpu, A_gpu, mem_size_matrices, cudaMemcpyHostToDevice);
error = cudaMemcpy(d_data, data, mem_size_matrices, cudaMemcpyHostToDevice);
// Setup execution parameters
int block_size = 32;
int num_blocks = n/block_size + (n % block_size != 0);
//Execute parallel code
scan_block<<<num_blocks, block_size>>>(d_A_gpu, d_data, d_partial_sums);
add_sums<<<num_blocks, block_size>>>(d_A_gpu, d_data, d_partial_sums);
// Copy result from device to host
error = cudaMemcpy(A_gpu, d_A_gpu, mem_size_matrices, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//Print values in our 2 c vectors to output.txt
//Code taken from http://www.tutorialspoint.com/cprogramming/c_file_io.htm
FILE *fpout;
fpout = fopen("output.txt", "w+");
fprintf(fpout, "index\tCPU\tGPU\tDifference\n");
for (int i = 0; i < n; i++) {
int difference = A_cpu[i] - A_gpu[i];
fprintf(fpout, "%i\t%i\t%i\t%i\n", i, A_cpu[i], A_gpu[i], difference);
}
fclose(fpout);
/*for (int i = 0; i < n; i++) {
printf("CPU[%i] = %i\n", i, A_cpu[i]);
printf("GPU[%i] = %i\n\n", i, A_gpu[i]);
}*/
// Clean up memory
free(data);
free(A_cpu);
free(A_gpu);
cudaFree(d_data);
cudaFree(d_A_gpu);
cudaFree(d_partial_sums);
cudaDeviceReset();
return 0;
}
|
3,493 | #include "includes.h"
#define FALSE 0
#define TRUE !FALSE
#define NUMTHREADS 16
#define THREADWORK 32
__device__ int dIsSignificant(float signif, int df) {
float tcutoffs[49] = {
// cuttoffs for degrees of freedom <= 30
637.000, 31.600, 2.920, 8.610, 6.869, 5.959, 5.408, 5.041, 4.781,
4.587, 4.437, 4.318, 4.221, 4.140, 4.073, 4.015, 3.965, 3.922,
3.883, 3.850, 3.819, 3.792, 3.768, 3.745, 3.725, 3.707, 3.690,
3.674, 3.659, 3.646,
// cuttoffs for even degrees of freedom > 30 but <= 50
3.622, 3.601, 3.582, 3.566, 3.551, 3.538, 3.526, 3.515, 3.505, 3.496,
// 55 <= df <= 70 by 5s
3.476, 3.460, 3.447, 3.435,
3.416, // 80
3.390, // 100
3.357, // 150
3.340, // 200
3.290 // > 200
};
size_t index = 0;
if(df <= 0) return 0;
else if(df <= 30) index = df - 1;
else if(df <= 50) index = 30 + (df + (df%2) - 32) / 2;
else if(df <= 70) {
if(df <= 55) index = 40;
else if(df <= 60) index = 41;
else if(df <= 65) index = 42;
else if(df <= 70) index = 43;
}
else if(df <= 80) index = 44;
else if(df <= 100) index = 45;
else if(df <= 150) index = 46;
else if(df <= 200) index = 47;
else if(df > 200) index = 48;
if(fabsf(signif) < tcutoffs[index]) return FALSE;
return TRUE;
}
__global__ void dUpdateSignif(const float * gpuData, size_t n, float * gpuResults)
{
size_t
i, start, inrow, outrow,
bx = blockIdx.x, tx = threadIdx.x;
float
radicand, cor, npairs, tscore;
start = bx * NUMTHREADS * THREADWORK + tx * THREADWORK;
for(i = 0; i < THREADWORK; i++) {
if(start+i > n) break;
inrow = (start+i)*5;
outrow = (start+i)*6;
cor = gpuData[inrow+3];
npairs = gpuData[inrow+4];
if(cor >= 0.999)
tscore = 10000.0;
else {
radicand = (npairs - 2.f) / (1.f - cor * cor);
tscore = cor * sqrtf(radicand);
}
if(dIsSignificant(tscore, (int)npairs)) {
gpuResults[outrow] = gpuData[inrow];
gpuResults[outrow+1] = gpuData[inrow+1];
gpuResults[outrow+2] = gpuData[inrow+2];
gpuResults[outrow+3] = cor;
gpuResults[outrow+4] = tscore;
gpuResults[outrow+5] = npairs;
} else {
gpuResults[outrow] = -1.f;
}
}
} |
3,494 | #include "includes.h"
__global__ void _copy_mat(float *m, float* target, int len){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < len){
target[tid] = m[tid];
}
} |
3,495 | #include <cuda_runtime_api.h>
#include <iostream>
using namespace std;
__global__ void kernel(int* tab, int elem_number) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (; i < elem_number; i += step) {
tab[i] = 2 * tab[i];
}
}
int main() {
const int elem_number = 4096;
int tab_cpu[elem_number];
int* tab_gpu;
cudaError_t status;
for (int i = 0; i < elem_number; i++) {
tab_cpu[i] = i;
}
status = cudaMalloc((void**)&tab_gpu, sizeof(int) * elem_number);
if (status != cudaSuccess) {
cout << cudaGetErrorString(status) << endl;
}
status = cudaMemcpy(tab_gpu, tab_cpu, sizeof(int) * elem_number, cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
cout << cudaGetErrorString(status) << endl;
}
kernel<<<2, 256, 0>>>(tab_gpu, elem_number);
status = cudaMemcpy(tab_cpu, tab_gpu, sizeof(int) * elem_number, cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
cout << cudaGetErrorString(status) << endl;
}
status = cudaFree(tab_gpu);
if (status != cudaSuccess) {
cout << cudaGetErrorString(status) << endl;
}
for (int i = 0; i < 10; i++) {
cout << "Index " << i << " value " << tab_cpu[i] << endl;
}
cout << "Index 4095 value " << tab_cpu[4095] << endl;
return 0;
}
|
3,496 | #include "includes.h"
__global__ void convolutionParallel(unsigned char* image, unsigned char* new_image, unsigned height, unsigned width, int thread_count, int convolution_size)
{
// process image
int offset = (blockIdx.x * blockDim.x + threadIdx.x);
int width_out = (width - convolution_size + 1);
int height_out = (height - convolution_size + 1);
//Loop over pixels of smaller image
for (int i = offset; i < width_out * height_out * 4; i += thread_count)
{
int row = i / (4*width_out);
int col = i % (4*width_out);
int reference_pixel_offset = 4 * row * width + col;
float sum = 0.0;
if (convolution_size == 3)
{
float w[9] =
{
1, 2, -1,
2, 0.25, -2,
1, -2, -1
};
for (int j = 0; j < convolution_size; j++)
for (int k = 0; k < convolution_size; k++)
sum += image[reference_pixel_offset + 4 * k + 4 * j * width] * w[j * convolution_size + k];
}
if (convolution_size == 5)
{
float w[25] =
{
0.5, 0.75, 1, -0.75, -0.5,
0.75, 1, 2, -1, -0.75,
1, 2, 0.25, -2, -1,
0.75, 1, -2, -1, -0.75,
0.5, 0.75, -1, -0.75, -0.5
};
for (int j = 0; j < convolution_size; j++)
for (int k = 0; k < convolution_size; k++)
sum += image[reference_pixel_offset + 4 * k + 4 * j * width] * w[j * convolution_size + k];
}
if (convolution_size == 7)
{
float w[49] =
{
0.25, 0.3, 0.5, 0.75, -0.5, -0.3, -0.25,
0.3, 0.5, 0.75, 1, -0.75, -0.5, -0.3,
0.5, 0.75, 1, 2, -1, -0.75, -0.5,
0.75, 1, 2, 0.25, -2, -1, -0.75,
0.5, 0.75, 1, -2, -1, -0.75, -0.5,
0.3, 0.5, 0.75, -1, -0.75, -0.5, -0.3,
0.25, 0.3, 0.5, -0.75, -0.5, -0.3, -0.25
};
for (int j = 0; j < convolution_size; j++)
for (int k = 0; k < convolution_size; k++)
sum += image[reference_pixel_offset + 4 * k + 4 * j * width] * w[j * convolution_size + k];
}
if (sum <= 0) sum = 0;
if (sum >= 255) sum = 255;
if ((i + 1) % 4 == 0) sum = 255; // Set a = 255
new_image[i] = (int) sum;
}
} |
3,497 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#define N 10
#define UPPER N*4
#define LOWER 1
#define THREADS_PER_BLOCK 512
__global__ void count_sort(int *a, int *s_a, int n);
void rand_init_array(int *array, int n, int upper, int lower);
void display_array(int *array, int n);
/*
* Main
*/
int main(int argc, char *argv[]){
int blocks;
float total_time, comp_time;
cudaEvent_t total_start, total_stop, comp_start, comp_stop;
cudaEventCreate(&total_start);
cudaEventCreate(&total_stop);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_stop);
/*
* Memory allocation on host
*/
int *array = (int *)malloc(N*sizeof(int));
int *sorted_array = (int *)malloc(N*sizeof(int));
/*
* Init array
*/
rand_init_array(array, N, UPPER, LOWER);
display_array(array, N);
/*
* Memory allocation on device
*/
int *array_dev, *sorted_dev;
cudaMalloc((void **)&array_dev, N*sizeof(int));
cudaMalloc((void **)&sorted_dev, N*sizeof(int));
cudaEventRecord(total_start);
/*
* Copy array from host memory to device memory
*/
cudaMemcpy(array_dev, array, N*sizeof(int), cudaMemcpyHostToDevice);
/*
* Create sufficient blocks
*/
blocks = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
cudaEventRecord(comp_start);
/*
* Kernel call
*/
count_sort<<< blocks, THREADS_PER_BLOCK >>>(array_dev, sorted_dev, N);
cudaEventRecord(comp_stop);
cudaEventSynchronize(comp_stop);
cudaEventElapsedTime(&comp_time, comp_start, comp_stop);
/*
* Copy c from host device memory to host memory
*/
cudaMemcpy(sorted_array, sorted_dev, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(total_stop);
cudaEventSynchronize(total_stop);
cudaEventElapsedTime(&total_time, total_start, total_stop);
/*
* Free memory on device
*/
cudaFree(array_dev);
cudaFree(sorted_dev);
cudaEventDestroy(comp_start);
cudaEventDestroy(comp_stop);
cudaEventDestroy(total_start);
cudaEventDestroy(total_stop);
/*
* GPU timing
*/
printf("N: %d, blocks: %d, total_threads: %d\n", N, blocks, THREADS_PER_BLOCK*blocks);
printf("Total time (ms): %f\n", total_time);
printf("Kernel time (ms): %f\n", comp_time);
printf("Data transfer time (ms): %f\n", total_time-comp_time);
display_array(sorted_array, N);
return 0;
}
/*
* Function: count_sort
* --------------------
* Performs count sort (enumeration sort) algorithm in parallel
*
* a: pointer of initial array
* s_a: pointer of sorted array
* n: number of elements in the array
*
*/
__global__ void count_sort(int *a, int *s_a, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int count = 0;
int j;
for (j = 0; j < n; ++j)
if (a[j] < a[index])
++count;
else if (a[j] == a[index] && j < index)
++count;
s_a[count] = a[index];
}
/*
* Function: rand_init_array
* --------------------
* Fills an integer array with random numbers
*
* array: the array that will be filled with numbers
* n: number of elements in the array
* upper: highest value of random number
* lower: lowest value of random number
*
*/
void rand_init_array(int *array, int n, int upper, int lower){
int i;
for (i=0; i<n; ++i)
array[i] = (rand() % (upper - lower + 1)) + lower;
}
/*
* Function: display_array
* --------------------
* Prints an integer array to user
*
* array: the array that will be printed
* n: number of elements in the array
*
*/
void display_array(int *array, int n){
(void) printf("[ ");
int i;
for (i=0; i<n; ++i)
(void) printf("%d ", array[i]);
(void) printf("]\n\n");
}
|
3,498 | #include "error.h"
#include <cuda.h>
#include <curand_kernel.h>
#include <cassert>
#include <iostream>
#include <string>
namespace
{
const char* curandGetErrorString(const curandStatus_t error)
{
switch (error)
{
case CURAND_STATUS_SUCCESS:
return "CURAND_STATUS_SUCCESS";
case CURAND_STATUS_VERSION_MISMATCH:
return "CURAND_STATUS_VERSION_MISMATCH";
case CURAND_STATUS_NOT_INITIALIZED:
return "CURAND_STATUS_NOT_INITIALIZED";
case CURAND_STATUS_ALLOCATION_FAILED:
return "CURAND_STATUS_ALLOCATION_FAILED";
case CURAND_STATUS_TYPE_ERROR:
return "CURAND_STATUS_TYPE_ERROR";
case CURAND_STATUS_OUT_OF_RANGE:
return "CURAND_STATUS_OUT_OF_RANGE";
case CURAND_STATUS_LENGTH_NOT_MULTIPLE:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case CURAND_STATUS_LAUNCH_FAILURE:
return "CURAND_STATUS_LAUNCH_FAILURE";
case CURAND_STATUS_PREEXISTING_FAILURE:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case CURAND_STATUS_INITIALIZATION_FAILED:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case CURAND_STATUS_ARCH_MISMATCH:
return "CURAND_STATUS_ARCH_MISMATCH";
case CURAND_STATUS_INTERNAL_ERROR:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
}
namespace cutw
{
void
cuassert(const int code,
const char* const file,
const int line,
const bool abort)
{
if (code != cudaSuccess)
{
std::string msg = "cutw: cuassert: ";
msg += cudaGetErrorString(static_cast<cudaError_t>(code));
msg += " @ ";
msg += file;
msg += ":";
msg += std::to_string(line);
std::cerr << msg << std::endl;
if (abort)
{
assert(false);
std::terminate();
}
}
}
void
curandassert(const int code,
const char* const file,
const int line,
const bool abort)
{
if (code != CURAND_STATUS_SUCCESS)
{
std::string msg = "cutw: curandassert: ";
msg += curandGetErrorString(static_cast<curandStatus_t>(code));
msg += " @ ";
msg += file;
msg += ":";
msg += std::to_string(line);
std::cerr << msg << std::endl;
if (abort)
{
assert(false);
std::terminate();
}
}
}
}
|
3,499 |
#include "cuda.h"
#include <time.h>
#include <stdio.h>
#include <math.h>
#define TILE_WIDTH 2
__global__
void reduce(int *data, int *result, int N) {
int i = threadIdx.x ;
for (int k = N/2; k > 0; k=k/2) {
if(i<k)
data[i] += data[k+i];
__syncthreads();
}
if (i == 0)
result[0] = data[0];
}
__global__
void map(int *vecA, int *vecB, int *vecC) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
vecC[i] = vecA[i] * vecB[i] ;
}
int main() {
int N =65536;
int *A_h = (int*)malloc(sizeof(int) * N );
int *B_h = (int*)malloc(sizeof(int) * N );
int *C_h = (int*)malloc(sizeof(int) * N );
int *D_h = (int*)malloc(sizeof(int) * N );
for (int i=0; i< N; i++) A_h[i] = i;
/*for (int i = 0; i < N; i++) {
printf("%d \t", A_h[i]);
}
printf("\n");*/
for (int i=0; i< N; i++) B_h[i] = i;
/*for (int i = 0; i < N; i++) {
printf("%d \t", B_h[i]);
}
printf("\n");*/
//for (int i=0; i< N; i++) fprintf(stdout, "%f\n", A_h[i]);
clock_t begin, end;
double elapsed;
//initialize matrices
int *A_d, *B_d,*C_d,*D_d;
cudaMalloc(&A_d, sizeof(int) * N );
cudaMalloc(&B_d, sizeof(int) * N);
cudaMalloc(&C_d, sizeof(int) * N);
cudaMalloc(&D_d, sizeof(int) * N);
begin = clock();
cudaMemcpy(A_d, A_h, sizeof(int) * N , cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, sizeof(int) * N , cudaMemcpyHostToDevice);
//launch kernel
//dim3 dimBlock(2, 2);
//dim3 dimGrid(N/2, N/2);
int xBlock = (N/TILE_WIDTH);
int xGrid = 1;
//matrixMultSimple<<<dimGrid, dimBlock>>>(A_d, B_d, C_d, N);
//reduce<<<xGrid, xBlock>>>(A_d, B_d, N, N);
map<<<xGrid, xBlock>>>(A_d, B_d, C_d);
cudaMemcpy(C_h, C_d, sizeof(float) * N, cudaMemcpyDeviceToHost);
/* for (int i = 0; i < N; i++) {
printf("%d \t", C_h[i]);
}
printf("\n");
*/
reduce<<<xGrid, xBlock>>>(C_d,D_d ,N);
cudaMemcpy(D_h, D_d, sizeof(float) * N, cudaMemcpyDeviceToHost);
// fprintf(stdout, "%d\n", D_h[0]);
end = clock();
elapsed = double(end - begin)/CLOCKS_PER_SEC;
fprintf(stdout, "%f\n", elapsed);
cudaFree(A_d);
cudaFree(B_d);
free(A_h);
free(B_h);
return 0;
}
|
3,500 | #include "VerifyMove.cuh"
int getCheck(Piece** board, int kingRow, int kingCol, int color){
// Loops through the rows
for(int direction=-1; direction<=1; direction+=2){
for(int row=kingRow; row<DIM && row>=0; row+=direction){
if(board[row][kingCol].piece.color==color){
break;
}
else if(board[row][kingCol].piece.isRook ||
board[row][kingCol].piece.isQueen ||
(board[row][kingCol].piece.isKing &&
abs(row-kingRow)==1)){
return 1;
}
}
}
// Loops through the cols
for(int direction=-1; direction<=1; direction+=2){
for(int col=kingCol; col<DIM && col>=0; col+=direction){
if(board[kingRow][col].piece.color==color){
break;
}
else if(board[kingRow][col].piece.isRook ||
board[kingRow][col].piece.isQueen ||
(board[kingRow][col].piece.isKing &&
abs(col-kingCol)==1)){
return 1;
}
}
}
// Loops through the diagonals
for(int rowDirection=-1; rowDirection<=1; rowDirection+=2){
for(int colDirection=-1; colDirection<=1; colDirection+=2){
for(int row=kingRow, col=kingCol;
row>=0 && row<DIM && col>=0 && col<DIM;
row+=rowDirection, col+=colDirection){
if(board[row][col].piece.color==color){
break;
}
else if(board[row][col].piece.isBishop ||
board[row][col].piece.isQueen ||
(board[row][col].piece.isKing &&
abs(row-kingRow)==1 &&
abs(col-kingCol)==1)){
return 1;
}
}
}
}
// Loops through the knights
for(int row=max(0, kingRow-2); row<=min(DIM-1, kingRow+2); row++){
for(int col=max(0, kingCol-2); col<=min(DIM-1, kingCol+2); col++){
if(row==kingRow || col==kingCol ||
abs(row-kingRow)==abs(col-kingCol)){
continue;
}
else if(board[row][col].piece.isKnight){
return 1;
}
}
}
return 0;
}
/**
* Gets the change in row
* Parameter oldRow: the row to move from
* Parameter newRow: the row to move to
* Returns: the change in row
*/
int getDeltaRow(int oldRow, int newRow){
return newRow-oldRow;
}
/**
* Gets the change in col
* Parameter oldCol: the col to move from
* Parameter newCol: the col to move to
* Returns: the change in col
*/
int getDeltaCol(int oldCol, int newCol){
return newCol-oldCol;
}
/**
* Checks to make sure the pawn move was valid
* Parameter board: the board to check the move from
* Parameter oldRow: the row that the pawn moves from
* Parameter oldCol: the col that the pawn moves from
* Parameter newRow: the row that the pawn moves to
* Parameter newCol: the col that the pawn moves to
* Parameter color: the color of the pawn to move
* Returns: whether or not the move was valid
*/
int isValidPawnMove(Piece** board, int oldRow, int oldCol, int newRow,
int newCol, int color){
int deltaRow=getDeltaRow(oldRow, newRow);
int deltaCol=getDeltaCol(oldCol, newCol);
int colorFactor=pow(-1, color);
// Double square advance
if(deltaRow==2*colorFactor &&
deltaCol==0 &&
board[oldRow][oldCol].piece.isFirstMove==1){
return canMoveFromTo(board, oldRow, oldCol, newRow, newCol,
color, 0, 0);
}
// Single square advance
else if(deltaRow==1*colorFactor && deltaCol==0){
return canMoveFromTo(board, oldRow, oldCol, newRow, newCol,
color, 0, 0);
}
// Take
else if(deltaRow==1*colorFactor && abs(deltaCol)==1 &&
hasEnemy(board, newRow, newCol, color)){
//printf("%d \n", board[newRow][newCol].numberConversion);
return canMoveFromTo(board, oldRow, oldCol, newRow, newCol,
color, 1, 0);
}
// Invalid move
else{
return 0;
}
}
/**
* Checks to make sure the rook move was valid
* Parameter board: the board to check the move on
* Parameter oldRow: the row that the rook moves from
* Parameter oldCol: the col that the rook moves from
* Parameter newRow: the row that the rook moves to
* Parameter newCol: the col that the rook moves to
* Parameter color: the color of the rook to move
* Returns: whether or not the move was valid
*/
int isValidRookMove(Piece** board, int oldRow, int oldCol, int newRow,
int newCol, int color){
int deltaRow=getDeltaRow(oldRow, newRow);
int deltaCol=getDeltaCol(oldCol, newCol);
// If it matches the parrern for a rook's move
if((deltaRow!=0 && deltaCol==0) ||
(deltaRow==0 && deltaCol!=0)){
return canMoveFromTo(board, oldRow, oldCol, newRow, newCol,
color, 1, 0);
}
// Invalid move pattern
else{
return 0;
}
}
/**
* Checks to make sure that the knight move was valid
* Parameter board: the board to check the move on
* Parameter oldRow: the row that the knight moves from
* Parameter oldCol: the col that the knight moves from
* Parameter newRow: the row that the knight moves from
* Parameter newCol: the col that the knight moves from
* Parameter color: the color of the knight to move
* Returns: whether or not the knight move was valid
*/
int isValidKnightMove(Piece** board, int oldRow, int oldCol, int newRow,
int newCol, int color){
int deltaRow=getDeltaRow(oldRow, newRow);
int deltaCol=getDeltaCol(oldCol, newCol);
// If it matches the pattern for a knight's move
if((abs(deltaRow)==1 && abs(deltaCol)==2) ||
(abs(deltaRow)==2 && abs(deltaCol)==1)){
return canMoveFromTo(board, oldRow, oldCol, newRow, newCol,
color, 1, 0);
}
// Invalid move pattern
else{
return 0;
}
}
/**
* Checks to make sure that the bishop's move was valid
* Parameter board: the board to check the move on
* Parameter oldRow: the row that the bishop moves from
* Parameter oldCol: the col that the bishop moves from
* Parameter newRow: the row that the bishop moves to
* Parameter newCol: the col that the bishop moves to
* Parameter color: the color of the bishop to move
* Returns: whether or not the bishop's move was valid
*/
int isValidBishopMove(Piece** board, int oldRow, int oldCol, int newRow,
int newCol, int color){
int deltaRow=getDeltaRow(oldRow, newRow);
int deltaCol=getDeltaCol(oldCol, newCol);
// If it matches the pattern for a bishop's move
if(abs(deltaRow)==abs(deltaCol)){
return canMoveFromTo(board, oldRow, oldCol, newRow, newCol,
color, 1, 0);
}
// Invalid move pattern
else{
return 0;
}
}
/**
* Checks to make sure that the queen's move was valid
* Parameter board: the board to check the move on
* Parameter oldRow: the row that the queen moves from
* Parameter oldCol: the col that the queen moves from
* Parameter newRow: the row that the queen moves to
* Parameter newCol: the col that the queen moves to
* Parameter color: the color of the queen to move
* Returns: whether or not the queen's move was valid
*/
int isValidQueenMove(Piece** board, int oldRow, int oldCol, int newRow,
int newCol, int color){
return isValidRookMove(board, oldRow, oldCol, newRow, newCol, color) ||
isValidBishopMove(board, oldRow, oldCol, newRow, newCol,
color);
}
/**
* Checks to make sure that the king's move was valid
* Parameter board: the board to check the move on
* Parameter oldRow: the row that the king moves from
* Parameter oldCol: the col that the king moves from
* Parameter newRow: the row that the king moves to
* Parameter newCol: the col that the king moves to
* Parameter color: the color of the king to move
* Returns: whether or not the king's move was valid
*/
int isValidKingMove(Piece** board, int oldRow, int oldCol, int newRow,
int newCol, int color){
int deltaRow=getDeltaRow(oldRow, newRow);
int deltaCol=getDeltaCol(oldCol, newCol);
// If it matches the pattern for a king's move
if((abs(deltaRow)==1 && abs(deltaCol)<=1) ||
(abs(deltaRow) <=1 && abs(deltaCol)==1)){
return canMoveFromTo(board, oldRow, oldCol, newRow, newCol,
color, 1, 0);
}
// If it matches the pattern for a castle
else if(oldRow==newRow &&
abs(deltaCol)==2 &&
board[oldRow][oldCol].piece.isFirstMove &&
canMoveFromTo(board, oldRow, oldCol, newRow, newCol, color,
0, 0) &&
((deltaCol<0 && board[oldRow][0].piece.isRook &&
board[oldRow][0].piece.isFirstMove &&
canMoveFromTo(board, oldRow, 0, newRow, 3, color,
0, 0)) ||
(deltaCol>0 && board[oldRow][7].piece.isRook &&
board[oldRow][7].piece.isFirstMove &&
canMoveFromTo(board, oldRow, 7, newRow, 5, color,
0, 0)))){
//Checks for a check between the two (inclusive)
for(int col=oldCol; col!=newCol+(deltaCol/abs(deltaCol)); col+=(deltaCol/abs(deltaCol))){
if(getCheck(board, oldRow, col, color)){
return 0;
}
}
return 1;
}
// Invalid move pattern
else{
return 0;
}
}
#include "ChessBoard.cuh"
/**
* Checks to see if a move is valid
* Parameter board: the board to check the move on
* Parameter oldRow: the row that the piece moves from
* Parameter oldCol: the col that the piece moves from
* Parameter newRow: the row that the piece moves to
* Parameter newCol: the col that the piece moves to
* Parameter color: the color of the person that's moving
* Returns: whether or not the move is valid
*/
int isValidMove(Piece** board, int oldRow, int oldCol, int newRow, int newCol,
int color){
/*printf("OldRow %d, OldCol %d NewRow %d, NewCol %d; ", oldRow, oldCol, newRow, newCol);
if(verifyBounds(oldRow, oldCol)){
printf("Valid old bounds ");
if(verifyBounds(newRow, newCol)){
printf("Valid new bounds ");
if(isOccupied(board, oldRow, oldCol)){
printf("Occupied start position ");
if(board[oldRow][oldCol].piece.color==color){
printf("Valid color move ");
if(!hasObstructions(board, oldRow, oldCol, newRow, newCol)){
printf("No obstructions. Passed to inside\n");
}
else{
printf("Obstructions found \n");
}
}
else{
printf("Invalid color move \n");
}
}
else{
printf("Occupied end position /n");
}
}
else{
printf("Non-valid new bounds /n");
}
}
else{
printf("Non-valid old bounds /n");
}*/
// Checks if there is a piece at oldRow oldCol that can be moved
if(verifyBounds(oldRow, oldCol) && verifyBounds(newRow, newCol) &&
(oldRow!=newRow || oldCol!=newCol) &&
isOccupied(board, oldRow, oldCol) &&
board[oldRow][oldCol].piece.color==color){
//printf("Inside\n");
// Checks each of the pieces
if(board[oldRow][oldCol].piece.isPawn){
/*if(newRow==6 && oldCol==newCol){
printChessBoard(board);
printf("PAWN oldRow=%d, oldCol=%d, newRow=%d, newCol=%d\n", oldRow, oldCol, newRow, newCol);
}*/
return isValidPawnMove(board, oldRow, oldCol, newRow,
newCol, color);
}
else if(board[oldRow][oldCol].piece.isRook){
//printf("ROOK\n");
return isValidRookMove(board, oldRow, oldCol, newRow,
newCol, color);
}
else if(board[oldRow][oldCol].piece.isKnight){
//printf("KNIGHT\n");
return isValidKnightMove(board, oldRow, oldCol,
newRow, newCol, color);
}
else if(board[oldRow][oldCol].piece.isBishop){
//printf("BISHOP\n");
return isValidBishopMove(board, oldRow, oldCol,
newRow, newCol, color);
}
else if(board[oldRow][oldCol].piece.isQueen){
//printf("QUEEN\n");
return isValidQueenMove(board, oldRow, oldCol,
newRow, newCol, color);
}
else if(board[oldRow][oldCol].piece.isKing){
//printf("KING\n");
return isValidKingMove(board, oldRow, oldCol, newRow,
newCol, color);
}
else{
printf("");
}
}
return 0;
}
/**
* Verifies the bounds of the new position to make sure it is in the board
* Parameter row: the row to check the bounds of
* Parameter col: the column to check the bounds of
* Returns: whether or now newRow and newCol are in the bounds of the board
*/
int verifyBounds(int row, int col){
return row>=0 && row<DIM && col>=0 && col<DIM;
}
/**
* Checks whether or not the square is occupied
* Parameter board: the board to check for the occupied square in
* Parameter row: the row to check if it is occupied
* Parameter col: the col to check if it is occupied
* Returns: whether or not the square is occupied
*/
int isOccupied(Piece** board, int row, int col){
return board[row][col].numberConversion!=0;
}
/**
* Checks whether or not the square has an enemy
* Parameter board: the board to check for the enemy
* Parameter row: the row to check for an enemy
* Parameter col: the column to check for an enemy
* Parameter color: the color of the side looking for an enemy
* Returns: whether or not there is an enemy in row, col
*/
int hasEnemy(Piece** board, int row, int col, int color){
return board[row][col].numberConversion != 0 &&
board[row][col].piece.color!=color;
}
/**
* Checks if there are obstructions between the start and end
* positions
* Parameter board: the board to check for obstructions
* Parameter oldRow: the row that the piece moves from
* Parameter oldCol: the col that the piece moves from
* Parameter newRow: the row that the piece moves to
* Parameter newCol: the col that the piece moves to
* Returns: whether or not there are obstructions between the
* start and end positions
*/
int hasObstructions(Piece** board, int oldRow, int oldCol,
int newRow, int newCol){
int deltaRow=pow(-1, newRow<oldRow);
int deltaCol=pow(-1, newCol<oldCol);
// If it moves along the col
if(oldRow==newRow){
for(int col=oldCol+deltaCol; col!=newCol; col+=deltaCol){
if(board[oldRow][col].numberConversion!=0){
return 1;
}
}
}
// If it moves along the row
else if(oldCol==newCol){
for(int row=oldRow+deltaRow; row!=newRow; row+=deltaRow){
if(board[row][oldCol].numberConversion!=0){
return 1;
}
}
}
// A diagonal
else if(abs(oldRow-newRow)==abs(oldCol-newCol)){
for(int row=oldRow+deltaRow, col=oldCol+deltaCol;
row!=newRow || col!=newCol;
row+=deltaRow, col+=deltaCol){
if(board[row][col].numberConversion!=0){
return 1;
}
}
}
return 0;
}
/**
* Checks whether or not the piece can move between the two
* locations
* Parameter board: the board to check
* Parameter oldRow: the old row to check
* Parameter oldCol: the old col to check
* Parameter newRow: the new row to check
* Parameter newCol: the new col to check
* Return: whether or not the move can be made
*/
int canMoveFromTo(Piece** board, int oldRow, int oldCol, int newRow,
int newCol, int color, int canTake, int canJump){
return verifyBounds(oldRow, oldCol) &&
verifyBounds(newRow, newCol) &&
((canTake && hasEnemy(board, newRow, newCol, color)) ||
!isOccupied(board, newRow, newCol)) &&
(canJump || !hasObstructions(board, oldRow, oldCol,
newRow, newCol));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.