serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
13,001 | #include "includes.h"
__global__ void total(float * input, float * output, int len) {
//@@ Load a segment of the input vector into shared memory
__shared__ float partialSum[2 * BLOCK_SIZE];
unsigned int tx = threadIdx.x;
unsigned int start = 2 * blockIdx.x * BLOCK_SIZE;
if ((start + tx) < len) {
partialSum[tx] = input[start + tx];
}
else {
partialSum[tx] = 0.0;
}
if ((start + BLOCK_SIZE + tx) < len) {
partialSum[BLOCK_SIZE + tx] = input[start + BLOCK_SIZE + tx];
}
else {
partialSum[BLOCK_SIZE + tx] = 0.0;
}
//@@ Traverse the reduction tree
for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride/=2) {
__syncthreads();
if (tx < stride) {
partialSum[tx] += partialSum[tx + stride];
}
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
// Boundary condition is handled by filling “identity value (0 for sum)”
// into the shared memory of the last block
if (tx == 0) {
output[blockIdx.x] = partialSum[0];
}
} |
13,002 |
//Needs Header Files for the functions; The header file should have both C and CUDA functions
//This file uses 6 hourly data. Each day is 6 hours long and skipping a day means to add 6
//to the counter that counts the timesteps (l).
//The birds start at 00:00 UTC which is 6pm in central time examplewhen there is no day light savings
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <pthread.h>
#include <string.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <math.h>
#include <float.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <getopt.h>
#include <math.h>
//#include "birds_CUDA.h"
//#define CUDA_API_PER_THREAD_DEFAULT_STREAM
/*
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
*/
#define PI 3.14159
#define LONG_SIZE 429
#define LAT_SIZE 429
#define LINESIZE 15*LONG_SIZE+LONG_SIZE - 3
#define TOTAL_DAYS 122
#define TIMESTEPS_PER_DAY 24
#define TIMESTEPS TOTAL_DAYS*TIMESTEPS_PER_DAY
#define SKIP_TIMESTEPS 0
//This is the number of timesteps that the bird will skip in the beginning to get to the desired
//takeoff time. Since the data starts at 7 pm, the birds will skip the first 23 hours to get to
//6pm.
#define INITIAL_SKIP_TIMESTEPS 23
//The maximum lattitude south that the model cares about bird flight. If birds go below
//that lattitude the model stops
//Counted from the North;
#define MAX_LAT_SOUTH 300
//Stopover days; As of now, if 0 then the bird flies without stopping continiously;
//If 1, then the bird waits for 18 hours after successful 6 hours of flight to fly again
#define STOPOVER_DAYS 0
//#define DESIRED_SPEED 3.6 //Birds want to travel at 10m/s, it is 36km/hr(in the grid it is 3.6 units per hour)
#define DESIRED_SPEED 10.5 //Air speed; Desired speed = flightspeed + windspeed ; Only used in windprofit calculation
#define STD_BIRDANGLE 10.0 //Standard deviation * 6 = the total difference from max to min angle possible
//If STD_BIRDANGLE = 10 then the angle can differ +- (10*6)/2 = +- 30 from mean
#define glCompAcc 1e-8 //If the difference is equal to or less than this then equal
#define MIN_PROFIT -10
//Defining the x-variable size, it's sum and
//sum of squares as needed for slope calculation
#define REGRESSION_HRS 6
//Precipitation (mm/hr) below which birds can fly
#define MAX_PRECIP 2
//HRS_SUM = sum(1 to 12) before. Now has to be sum(1 to 6) = 21
#define HRS_SUM 21
#define HRS_SQUARE_SUM 91
#define DENOM_SLOPE (REGRESSION_HRS * HRS_SQUARE_SUM)-(HRS_SUM * HRS_SUM)
// Barometric pressure
// Bird finds the pressure at the time it leaves and compares it with the data from
// the previous day.
//The angle that the bird flies when it is out at sea and needs to get back to land.
//To make the birds head back directly west the angle must be set to 180.
#define BIRD_SEA_ANGLE 180
//The maximum number of hours that the birds can fly continiously
#define BIRD_HRS_LIMIT 72
#define TOTAL_DATA_FILES 9
//Total number of data files or variables bird flight depends on;Does not include direction files and land water data
#define NUM_DATA_FILES 6
#define THREADS_PER_BLOCK 32
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define TOTAL_DAYS_PER_DATA_TRANSFER 5
//------------------------------Notes---------------------------------------------------------------------------------------
/*
Altitude = 850 millibars
Year = 2009
22 Jan 2015 No upper limit to the bird flight speed currently; Birds can fly well above 10m/s
Precipitation = millimeters
*/
//--------------------------------------------------------------------------------------------------------------------------
__global__ void setup_kernel(unsigned int seed,curandState *states,int NumOfBirds);
__global__ void generate_kernel(curandState *states,float* numbers,int NumOfBirds);
__global__ void bird_movement(float* rowArray,float* colArray,int NumOfBirds,int start_l,int cur_l,int max_timesteps,float* udata,float* vdata,
float* u10data,float* v10data,float* dirData,float* precipData,float* pressureData,
float* lwData,uint8_t* birdStatus,int* birdTimesteps);
__device__ float bilinear_interpolation_SmallData(float x,float y,float* data_array);
__device__ float bilinear_interpolation_LargeData(float x,float y,float* data_array,int l);
__device__ float WrappedNormal(int id,float MeanAngle,float AngStdDev,float* rand_norm_nums,int cur_timestep);
__device__ float getProfitValue(float u_val,float v_val,float dirVal,float dir_u,float dir_v);
__device__ int bird_AtSea_Within24Hrs(int id,int arrLength,float* rowArray,float* colArray,int start_l,
int l,float* udata,float* vdata,float* lwData,uint8_t* birdStatus,uint8_t var_product,uint8_t l_product,uint8_t l_idx);
__device__ float randNorm(int id, int timestep, float mean, float stdev);
static void* write_dataVars(void* arguments);
static void* read_dataFiles(void* arguments);
int convert_to_month(int month,int day);
static void HandleError( cudaError_t err,const char *file, int line );
long Get_GPU_devices();
//-------------------------------------------------------------------------------------------------------------------------------------
struct file_IO {
FILE *fp;
float* inpVals;
float* streamArray;
size_t dataSize;
}inpStruct[8];
//-------------------------------------------------------------------------------------------------------------------------------------
//Global Variables
float* udata;
float* vdata;
float* u10data;
float* v10data;
float* precipData;
float* pressureData;
float* dir_u;
float* dir_v;
float* lwData;
float* dirData;
//-------------------------------------------------------------------------------------------------------------------------------------
__device__ __constant__ int TotalTimesteps = TIMESTEPS;
__device__ __constant__ int LatSize = LAT_SIZE;
__device__ __constant__ int LongSize = LONG_SIZE;
__device__ __constant__ float pi = PI;
__device__ __constant__ int InitialSkipTimesteps = INITIAL_SKIP_TIMESTEPS;
__device__ __constant__ int StdBirdAngle = STD_BIRDANGLE;
__device__ __constant__ int BirdSeaAngle = BIRD_SEA_ANGLE;
__device__ __constant__ int BirdHrsLimit = BIRD_HRS_LIMIT;
__device__ __constant__ int MinProfit = MIN_PROFIT;
__device__ __constant__ int MaxPrecip = MAX_PRECIP;
__device__ __constant__ int MaxLatSouth = MAX_LAT_SOUTH;
__device__ __constant__ int DesiredSpeed = DESIRED_SPEED;
__device__ __constant__ int StopoverDays = STOPOVER_DAYS;
__device__ __constant__ int DenomSlope = DENOM_SLOPE;
__device__ __constant__ int HrsSum = HRS_SUM;
__device__ __constant__ int RegressionHrs = REGRESSION_HRS;
__device__ __constant__ float GlCompAcc = glCompAcc;
__device__ __constant__ float Rand_Precision = 1000;
__device__ int CurrentTimestep = 0;
//###########################################################################################################################################//
//Getting a random normal number from Dr. David Heibler
//u1 and u2, two random numbers created using Linear Congruential Generator
//The period maximized by using Hull-Dobell Theorem (http://chagall.med.cornell.edu/BioinfoCourse/PDFs/Lecture4/random_number_generator.pdf)
//Using the theorem to get u1: a1 = 1791, m1 = 2864 and c1 = 5827. (c1 is prime therefore c1 is relatively prime with any other number; Choosing m1=2864
// so that the prime factors are 2 and 179; Now, a1 = 179*10 + 1 as a1-1 has to be divisible by prime factors of m1)
//Using the theorem to get u2: a2 = 931, m2 = 5382 and c2 = 9461. (31, 2 and 3 are prime factors of 5382, therefore a2 = 931)
__device__ float randNorm(int id, int timestep, float mean, float stdev)
{
int tmp, seed, a1, m1, c1, a2, m2, c2;
float x, u1, u2;
//Seed has to be higher than int
seed = (id+1)*(int)timestep;
a1 = 1791;
m1 = 2864;
c1 = 5827;
a2 = 931;
m2 = 5382;
c2 = 9461;
tmp = (a1 * seed + c1) % m1;
if(tmp == 0) tmp=1;
u1 = (float)tmp/(float)m1;
tmp = (a2 * seed + c2) % m2;
if(tmp == 0) tmp=1;
u2 = (float)tmp/(float)m2;
x = sqrt(-2.0*logf(u1)) * cosf(2*pi*u2);
x = x*stdev + mean;
if(id==34)printf("timestep: %d, tmp2: %d,u1: %f, u2: %f,x: %f\n",timestep,tmp,u1,u2,x);
return x;
}
//###########################################################################################################################################//
__device__ int bird_AtSea_Within24Hrs(int id,int arrLength,float* rowArray,float* colArray,int start_l,int l,
float* udata,float* vdata,float* lwData,uint8_t* birdStatus,uint8_t var_product,uint8_t l_product,uint8_t l_idx)
{
float u_val,v_val,u_dir,v_dir,pos_row,pos_col;
float index = 0;
int bckp_l;
float count_timeSteps = 0;
uint8_t var_product2;
var_product2 = var_product;
pos_row = rowArray[id * arrLength + l - l_idx];
pos_col = colArray[id * arrLength + l - l_idx];
u_dir = DesiredSpeed * cosf(BirdSeaAngle * (pi/180));
v_dir = DesiredSpeed * sinf(BirdSeaAngle * (pi/180));
for(count_timeSteps = 10;count_timeSteps<24;count_timeSteps++){
var_product2 = var_product2 * birdStatus[id];
/** Bilinear interpolation for u and v data **/
u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l);
v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l);
/** Desired speed needs to change in the case of column position or the birds
will not fly west **/
pos_row = pos_row + var_product2 * (v_val + v_dir) * 0.36 * -1;
pos_col = pos_col + var_product2 * (u_val + u_dir) * 0.36;
rowArray[id * arrLength + l] = pos_row;
colArray[id * arrLength + l] = pos_col;
//printf("At sea within 24 hours; \tRow: %f,Col:%f\n",rowArray[id * arrLength + l],colArray[id * arrLength + l]);
//printf("At sea within 24 hours; Timestep #: %ld\n",l);
index = lwData[__float2int_rd(pos_row * LongSize + pos_col)];
//printf("Index after 10 hours is %f\n",index);
if(index == 1.0){
var_product2 = 0;
}else if (index == 0){ //If bird is above sea
var_product2 = birdStatus[id];
}else if (index > 1){ //If bird is above fresh water
var_product2 = birdStatus[id];
}
if((pos_row > LatSize-1)||(pos_row >= MaxLatSouth) || (pos_col > LongSize-1)||(pos_row < 0.0)||(pos_col < 0.0 )){
birdStatus[id] = 0;
}
l = l + l_product;
}
l = l - l_product;
/* if(index == 0){
birdStatus[id] = 0;
} */
return l;
}
//###########################################################################################################################################//
__device__ int bird_AtSea_After24Hrs(int id,int arrLength,float* rowArray,float* colArray,int start_l,int l,
float* udata,float* vdata,float* lwData,uint8_t* birdStatus,uint8_t var_product,uint8_t l_product)
{
float u_val,v_val,u_dir,v_dir,pos_row,pos_col;
int count_timeSteps, timesteps_limit, index;
uint8_t var_product2;
index = 0;
var_product2 = var_product;
pos_row = rowArray[id * arrLength + l - 1];
pos_col = colArray[id * arrLength + l - 1];
//printf("After getting the first row and cols (Inside After 24 hours function)\n");
u_dir = DesiredSpeed * cosf(BirdSeaAngle * (pi/180));
v_dir = DesiredSpeed * sinf(BirdSeaAngle * (pi/180));
//These 25 for both condition as the for loop must
//be done atleast once so that l=l+l_product is done inside
//the loop and it is offset by l=l-l_product outside the loop.
if(l_product == 0){
timesteps_limit = 25;
}else{
timesteps_limit = BirdHrsLimit;
}
if(var_product2 == 0){
timesteps_limit = 25;
}else{
timesteps_limit = BirdHrsLimit;
}
//printf("After getting the timestep limit (Inside After 24 hours function)\n");
//This loop is skipped if a bird is not at sea after 24 hours
for(count_timeSteps = 24; count_timeSteps < timesteps_limit; count_timeSteps++){
var_product2 = var_product2 * birdStatus[id];
//printf("Count Timesteps:: %d\n",count_timeSteps);
//printf("l:%ld, start_l: %ld \n",l,start_l);
//printf("l-start_l:%ld\n",l-start_l);
/** Bilinear interpolation for u and v data **/
u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l);
v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l);
/** Desired speed needs to change in the case of column position or the birds
will not fly west **/
pos_row = pos_row + var_product2 * (v_val + v_dir) * 0.36 * -1;
pos_col = pos_col + var_product2 * (u_val + u_dir) * 0.36;
//printf("At sea after 24 hours; \tRow: %f,Col:%f\n",rowArray[id * arrLength + l],colArray[id * arrLength + l]);
//printf("At sea after 24 hours; Timestep #: %ld\n",l);
rowArray[id * arrLength + l + 1] = pos_row;
colArray[id * arrLength + l + 1] = pos_col;
index += lwData[__float2int_rd(pos_row * LatSize + pos_col)];
//Checking if the bird found land
//Limit calculated only if bird found at land the first time
if(index == 1){ //If bird is above land
var_product2 = 0;
timesteps_limit = __float2ull_ru(count_timeSteps/24) * 24 + 24 * StopoverDays;
}else if (index == 0){ //If bird is above sea
var_product2 = var_product2;
}else if (index > 1){ //If bird is above fresh water
var_product2 = var_product2;
}
//l = l + var_product2;
l = l + l_product;
if((pos_row > LatSize-1)||(pos_row >= MaxLatSouth) || (pos_col > LongSize-1)||(pos_row < 0.0)||(pos_col < 0.0 )){
birdStatus[id] = 0;
}
}
index = lwData[__float2int_rd(pos_row * LatSize + pos_col)];
//printf("After getting the index (Inside After 24 hours function)\n");
if (index != 1){
birdStatus[id] = 0;
}
l = l - l_product;
return l;
}
//###########################################################################################################################################//
__device__ float getProfitValue(float u_val,float v_val,float dirVal,float dir_u,float dir_v)
{
/** All wind data in m/s **/
float diffAngle,magnitude,magnitude_squared,tailComponent,crossComponent,profit_value;
tailComponent = 0;
magnitude = hypotf(u_val,v_val);
magnitude_squared = magnitude * magnitude;
/** Getting the tail component of the wind; or the component of the wind in the desired direction of flight
From formula of getting the vector projection of wind onto the desired direction **/
tailComponent = (dir_v * v_val + dir_u * u_val);
tailComponent = tailComponent/hypotf(dir_u,dir_u);
/** DiffAngle is the angle between the desired direction of the bird and the direction of the wind
DiffAngle has to be calculated such that both the vectors are pointing away from where they meet.
Using the formula to get angle between two vectors **/
diffAngle = acosf( (u_val*dir_u + v_val * dir_v)/ (( hypotf(u_val,v_val) * hypotf(dir_u,dir_v) )) ) * 180/pi;
/** Separate profit value methods have to be used if the tail component is less that equal to or greater than the desired speed of the birds **/
if(tailComponent <= DesiredSpeed) {
profit_value = (DesiredSpeed * DesiredSpeed) + magnitude_squared - 2 * DesiredSpeed * magnitude * cosf(diffAngle * pi/180);
profit_value = DesiredSpeed - sqrtf(profit_value);
}
else {
/** Perpendicular to a vector (x,y) is (y,-x) or (-y,x) Cross component is always positive **/
crossComponent = fabsf((-dir_v*u_val + dir_u*v_val)/hypotf(dir_v,dir_u));
profit_value = tailComponent - crossComponent;
}
return profit_value;
}
//###########################################################################################################################################//
__device__ float bilinear_interpolation_SmallData(float x,float y,float* data_array)
{
float x1,y1,x2,y2;
float Q11,Q12,Q21,Q22,R1,R2,R;
x1 = floorf(x);
x2 = ceilf(x);
y1 = floorf(y);
y2 = ceilf(y);
R = 0;
Q11 = data_array[(int)(y1 * LongSize + x1)];
Q12 = data_array[(int)(y2 * LongSize + x1)];
Q21 = data_array[(int)(y1 * LongSize + x2)];
Q22 = data_array[(int)(y2 * LongSize + x2)];
R1 = Q11 + (x - x1)*(Q21 - Q11);
R2 = Q12 + (x - x1)*(Q22 - Q12);
R = R1 + (y - y1)*(R2 - R1);
return R;
}
//###########################################################################################################################################//
__device__ float bilinear_interpolation_LargeData(float x,float y,float* data_array,int l)
{
float x1,y1,x2,y2;
float Q11,Q12,Q21,Q22,R1,R2,R;
x1 = floorf(x);
x2 = ceilf(x);
y1 = floorf(y);
y2 = ceilf(y);
R = 0;
Q11 = data_array[(int)(l * LatSize * LongSize + y1 * LongSize + x1) ];
Q12 = data_array[(int)(l * LatSize * LongSize + y2 * LongSize + x1) ];
Q21 = data_array[(int)(l * LatSize * LongSize + y1 * LongSize + x2) ];
Q22 = data_array[(int)(l * LatSize * LongSize + y2 * LongSize + x2) ];
R1 = Q11 + (x - x1)*(Q21 - Q11);
R2 = Q12 + (x - x1)*(Q22 - Q12);
R = R1 + (y - y1)*(R2 - R1);
return R;
}
//###########################################################################################################################################//
/*
__global__ void setup_kernel(unsigned int seed,curandState *states,int NumOfBirds)
{
//Thread indices
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int id = y * TotalTimesteps + x;
if((x >= TotalTimesteps) || (x < 0)){
return;
}else if((y>= NumOfBirds) || (y < 0)){
return;
}else if(id >= TotalTimesteps * NumOfBirds){
return;
}else{
curand_init(seed,id,0,&states[id]);
}
}
//###########################################################################################################################################//
__global__ void generate_kernel(curandState *states,float* numbers,int NumOfBirds)
{
//Thread indices
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int id = y * TotalTimesteps + x;
if((x >= TotalTimesteps) || (x < 0)){
return;
}else if((y>= NumOfBirds) || (y < 0)){
return;
}else if(id >= TotalTimesteps * NumOfBirds){
return;
}else{
//Making a local copy for efficiency
curandState localState = states[id];
numbers[id] = curand_normal(&localState);
}
return;
}
*/
//###########################################################################################################################################//
//###########################################################################################################################################//
//###########################################################################################################################################//
__global__ void bird_movement(float* rowArray,float* colArray,int NumOfBirds,int start_l,int cur_l,int max_timesteps,float* udata,float* vdata,
float* u10data,float* v10data,float* dirData,float* precipData,float* pressureData,
float* lwData,uint8_t* birdStatus,int* birdTimesteps)
{
//Thread indices
int id = blockIdx.x * blockDim.x + threadIdx.x;
//if(id > (NumOfBirds -1)||(birdStatus[id]==0)||(birdTimesteps[id] > cur_l)){
if(id > (NumOfBirds -1)||(birdTimesteps[id] > cur_l)){
//The condition cur_l > max_timesteps is OK now because all birds start at the same time
//NOT OK once birds start at different dates
if(birdTimesteps[id] > cur_l){
printf("birdTimesteps: %d, cur_l: %d\n",birdTimesteps[id],cur_l);
}
return;
}
else{
uint8_t var_sea, var_profit_10m, var_10hrsSea, var_product, l_product,l_idx;
//Making a local copy of the timstep variable
int l,new_l,prev_l;
long l_old;
float profit_value,actualAngle,wrappedAngle, index;
float last_pressure,pressure_sum,pressure_MultSum,slope;
float u_ten,v_ten,u_val,v_val,uDir_value,vDir_value,precip_val;
int k;
float pos_row,pos_col;
int arrLength,days;
//--------------Checking if timestep is larger than the current timestep
//Should be changed from cur_l to max_timesteps
if(birdTimesteps[id] > cur_l){
l_product = 0;
var_product = 0;
}else{
l_product = 1;
}
// l_product = 1;
l = cur_l;
new_l = l;
arrLength = (TotalTimesteps + 1); //Why +1 ?
slope = 0;
days = 0;
//printf("Value of l is %ld\n",l);
//printf("Array length per bird is %d\n",arrLength);
//printf("id is %d\n",id);
//printf("id * arrayLength is:%d\n",id*arrLength);
//printf("Starting pos_row is %f , pos_col is: %f\n",*(rowArray + id * arrLength + l -1),*(colArray + id * arrLength + l -1));
//printf("Before any computation; Timestep #: %ld\n",l);
while((l < max_timesteps) && (days<5)){
pos_row = rowArray[id * arrLength + l - 1]; //Why -1 ?
pos_col = colArray[id * arrLength + l - 1];
if((pos_row > LatSize-1) ||(pos_row >= MaxLatSouth) || (pos_col > LongSize-1)||(pos_row < 0.0)||(pos_col < 0.0)){
birdStatus[id] = 0;
if(id==34) printf("(Before computation) status = 0; As pos_row = %f (id:%d)\n",pos_row,id);
}
//--------------Getting the wrapped angle
actualAngle = dirData[__float2int_rd(pos_row * LatSize + pos_col)];
wrappedAngle = randNorm(id,l, actualAngle, STD_BIRDANGLE);
if(wrappedAngle > 360){
wrappedAngle = wrappedAngle - 360;
}else if(wrappedAngle < 0 ){
wrappedAngle = 360 + wrappedAngle;
}
//--------------
uDir_value = DesiredSpeed * cosf(wrappedAngle * (pi/180));
vDir_value = DesiredSpeed * sinf(wrappedAngle * (pi/180));
u_ten = bilinear_interpolation_LargeData(pos_col,pos_row,u10data,l-start_l);
v_ten = bilinear_interpolation_LargeData(pos_col,pos_row,v10data,l-start_l);
profit_value = getProfitValue(u_ten,v_ten,wrappedAngle,uDir_value,vDir_value);
//--------------Checking for profit value
if((profit_value >= MinProfit) && ((last_pressure>=1009)||(slope >-1))){
var_profit_10m = 1;
}else{
var_profit_10m = 0;
//printf("Profit value at 10m is low \n");
}
//--------------
if(id==34) printf("l_product: %d \n",l_product);
if(id==34) printf("Start timestep: %d\n",l);
prev_l = l;
//-----------------------------The 6 hour flight
for(k=0;k<6;k++) {
//Getting the wrapped angle
actualAngle = dirData[__float2int_rd(pos_row * LatSize + pos_col)];
if(id==34) printf("(Inside 6 hour flight) l: %d,STD_BIRDANGLE: %f,actualAngle: %f, (id: %d)\n",l,STD_BIRDANGLE,actualAngle,id);
wrappedAngle = randNorm(id,l, actualAngle, STD_BIRDANGLE);
if(wrappedAngle > 360){
wrappedAngle = wrappedAngle - 360;
}else if(wrappedAngle < 0 ){
wrappedAngle = 360 + wrappedAngle;
}
uDir_value = DesiredSpeed * cosf(wrappedAngle * (pi/180));
vDir_value = DesiredSpeed * sinf(wrappedAngle * (pi/180));
if(id==34) printf("(Inside 6 hour flight) wrappedAngle: %f, uDir_value: %f,vDir_value: %f\n",wrappedAngle,uDir_value,vDir_value);
u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l);
v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l);
precip_val = bilinear_interpolation_LargeData(pos_col,pos_row,precipData,l-start_l);
if(id==34) printf("(Inside 6 hour flight) u_val: %f,v_val: %f\n",u_val,v_val);
//Getting the previous position values for row and column
pos_row = rowArray[id * arrLength + l - 1];
pos_col = colArray[id * arrLength + l - 1];
if(id==34) printf("(Inside 6 hour flight) pos_row: %f,pos_col: %f,arrLength: %d,l: %d,(id:%d)\n",pos_row,pos_col,arrLength,l,id);
//printf("During 6 hour flight: Row: %f \t Col: %f (id:%d)\n\n\n",pos_row,pos_col,id);
if((pos_row > LatSize-1)||(pos_row >= MaxLatSouth) || (pos_col > LongSize-1)||(pos_row < 0.0)||(pos_col < 0.0 )){
birdStatus[id] = 0;
if(id==34) printf("(In 6 hours flight) status = 0; As pos_row = %f (id:%d)\n",pos_row,id);
//printf("Dead bird \n");
}
var_product = birdStatus[id] * var_profit_10m * l_product;
//Storing the new values
rowArray[id * arrLength + l] = pos_row + var_product * (v_val + vDir_value ) * 0.36 * -1;
colArray[id * arrLength + l] = pos_col + var_product * (u_val + uDir_value) * 0.36;
if(id==34) printf("(Inside 6 hour flight) var_product: %d,var_profit_10m: %d,l_product: %d,uDir_value: %f,vDir_value: %f,row: %f,col: %f\n",var_product,var_profit_10m,l_product,uDir_value,vDir_value,rowArray[id * arrLength + l],colArray[id * arrLength + l]);
//printf("6 Hour Flight\tRow: %f,Col:%f\n",rowArray[id * arrLength + l],colArray[id * arrLength + l]);
//printf("6 hour flight;Timestep #: %ld\n",l);
l = l + l_product;
}
//If l_product = 0 then the timestep should remain the same
if (prev_l == l){
l_idx = 0;
}else{
l_idx = 1;
}
//The value of l increases at the last iteration
pos_row = rowArray[id * arrLength + l - l_idx];
pos_col = colArray[id * arrLength + l - l_idx];
index = lwData[__float2int_rd(pos_row * LatSize + pos_col)];
// If the bird is at sea after the first 6 hours of flight
if( index == 1.0){
var_sea = 0;
if(id==34) printf("Not at sea after 6 hours \n");
}else{
var_sea = 1;
if(id==34) printf("At sea after 6 hours \n");
}
//Getting the wrapped angle; Same uDir_value and vDir_value used for the 4 hours
actualAngle = dirData[__float2int_rd(pos_row * LatSize + pos_col)];
wrappedAngle = randNorm(id,l, actualAngle, STD_BIRDANGLE);
if(wrappedAngle > 360){
wrappedAngle = wrappedAngle - 360;
}else if(wrappedAngle < 0 ){
wrappedAngle = 360 + wrappedAngle;
}
uDir_value = DesiredSpeed * cosf(wrappedAngle * (pi/180));
vDir_value = DesiredSpeed * sinf(wrappedAngle * (pi/180));
prev_l = l;
if(id==34) printf("(After 6 hour flight) u_val: %f,v_val: %f\n",u_val,v_val);
//-----------------------At sea after first 6 hours of flight
for(k=6;k<10;k++){
u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l);
v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l);
var_product = birdStatus[id] * var_profit_10m * var_sea * l_product;
//Getting new position values for row and column and storing it
pos_row = pos_row + var_product * (v_val + vDir_value ) * 0.36 * -1;
pos_col = pos_col + var_product * (u_val + uDir_value) * 0.36;
if(id==34) printf("(During +4 hour flight) u_val: %f,v_val: %f, var_product: %f,uDir_value: %d, vDir_value:%f\n",u_val,v_val,var_product,uDir_value,vDir_value);
//printf("+4 Hour Flight\tRow: %f,Col:%f\n",pos_row,pos_col);
//printf("+4 hour flight;Timestep #: %ld\n",l);
if((pos_row > LatSize -1 )||(pos_row >= MaxLatSouth) || (pos_col > LongSize -1 )||(pos_row < 0.0)||(pos_col < 0.0 )){
birdStatus[id] = 0;
if(id==34) printf("(During +4 hour flight) status = 0; As pos_row = %f (id:%d)\n",pos_row,id);
}
rowArray[id * arrLength + l] = pos_row;
colArray[id * arrLength + l] = pos_col;
if(id==34) printf("During +4 hour flight: Row: %f \t Col: %f, u_val: %f, v_val: %f, l:%d (id:%d)\n\n\n",pos_row,pos_col,u_val,v_val,l,id);
//printf("+4 Hour Flight\tRow: %f,Col:%f\n",rowArray[id * arrLength + l + 1],colArray[id * arrLength + l + 1]);
l = l + l_product;
}
//------------------------
index = lwData[__float2int_rd(pos_row * LongSize + pos_col)];
if(index == 1){
var_sea = 0;
//printf("Not at sea after 10 hours \n");
}else{
var_sea = 1;
//printf("At sea after 10 hours \n");
}
//----------------------- If at sea even after the 10 hours but within 24 hours
var_product = birdStatus[id] * var_profit_10m * var_sea * l_product;
l = bird_AtSea_Within24Hrs(id,arrLength,rowArray,colArray,start_l,l,udata,vdata,lwData,birdStatus,var_product,l_product,l_idx);
//------------------------
if(id==34) printf("Timestep after bird_AtSea_Within24Hrs %d\n",l);
pos_row = rowArray[id * arrLength + l - 1]; //Why -1 ?
pos_col = colArray[id * arrLength + l - 1];
//printf("Before getting the index \n");
index = lwData[__float2int_rd(pos_row * LongSize + pos_col)];
if(index == 1.0){
var_sea = 0;
if(id==34) printf("Var_sea: Not at sea after 24 hours (id:%d) \n",id);
}else{
var_sea = 1;
if(id==34) printf("Var_sea: At sea after 24 hours (id:%d) \n",id);
}
//printf("After getting the index \n");
//----------------------- If at sea even after the the 10 hours and beyond 24 hours
var_product = birdStatus[id] * var_profit_10m * var_sea * l_product;
if(var_product == 1){
if(id==34) printf("Var product = 1 : Calculations done for at sea after 24 hours(id:%d) \n",id);
}else{
if(id==34) printf("Var product = 0; No calculations for at sea after 24 hours (id:%d) \n",id);
}
//printf("After the variable product \n");
if(id==34) printf("birdStatus[%d]: %d,var_profit_10m: %d,var_sea: %d,l_product: %d \n",id,birdStatus[id],var_profit_10m,var_sea,l_product);
//printf("The current value of l is: %ld And of start_l is: %ld \n\n",l,start_l);
l = bird_AtSea_After24Hrs(id,arrLength,rowArray,colArray,start_l,l,udata,vdata,lwData,birdStatus,var_product,l_product);
if(id==34) printf("Timestep after bird_AtSea_After24Hrs %d (id: %d)\n",l,id);
//days = (l - start_l)/TIMESTEPS_PER_DAY;
//printf("Days: %d\n",days);
//printf("After bird_AtSea_After24Hrs and before regression calculations \n");
//------------------------
birdTimesteps[id] = l;
l_old = l - RegressionHrs;
pressure_sum = 0;
pressure_MultSum = 0;
if(birdStatus[id]==1){
k=1;
}else{
k=RegressionHrs+1;
}
//Taking the pressure from 6 hours earlier of the location where the bird landed
while((l_old < l) && (k<=RegressionHrs)){
pressure_sum += bilinear_interpolation_LargeData(pos_col,pos_row,pressureData,l_old-start_l); //<----------------ERROR HERE
pressure_MultSum += k * bilinear_interpolation_LargeData(pos_col,pos_row,pressureData,l_old-start_l);
//last_pressure is the last day or the day of flight
if(k == RegressionHrs) {
last_pressure = bilinear_interpolation_LargeData(pos_col,pos_row,pressureData,l_old-start_l);
}
l_old++;
k++;
}
if(birdStatus[id]==1){
slope = ((RegressionHrs * pressure_MultSum) - (pressure_sum * HrsSum))/(DenomSlope);
}else{
slope = 0;
}
if(id==34) printf("l: %d,birdTimesteps[id]: %d (id:%d)\n",l,birdTimesteps[id],id);
days++;
if(id==34) printf("Days: %d (id:%d)\n",days,id);
if(id==34) printf("Row: %f \t Col: %f (id:%d)\n\n\n",pos_row,pos_col,id);
if(id==34) printf("--------------------------------------------------------------\n");
l = l + l_product;
}
}
}
//###########################################################################################################################################//
//###########################################################################################################################################//
//###########################################################################################################################################//
long Get_GPU_devices()
{
cudaDeviceProp prop;
int whichDevice,DeviceCount;
long deviceMemory;
HANDLE_ERROR(cudaGetDevice(&whichDevice));
HANDLE_ERROR(cudaGetDeviceProperties(&prop,whichDevice));
if(!prop.deviceOverlap){
//printf("Device does not handle overlaps so streams are not possible\n");
return 0;
}
DeviceCount = 0;
HANDLE_ERROR(cudaGetDeviceCount(&DeviceCount));
if(DeviceCount > 0){
//printf("%d Devices Found\n",DeviceCount);
}else{
//printf("No devices found or error in reading the number of devices\n");
return 0;
}
int i = 0;
cudaDeviceProp properties;
HANDLE_ERROR(cudaGetDeviceProperties(&properties,i));
//printf("Device Number: %d\n", i);
//printf(" Device name: %s\n", properties.name);
//printf(" Device Global Memory size: %zd MB \n",properties.totalGlobalMem/1000000);
//printf("\n");
deviceMemory = properties.totalGlobalMem;
return deviceMemory;
}
//###########################################################################################################################################//
static void* read_dataFiles(void* arguments)
{
struct file_IO *inputArgs;
inputArgs = (struct file_IO *)arguments;
FILE* textFile;
float* dataArray;
textFile = inputArgs->fp;
dataArray = inputArgs->inpVals;
char line[LINESIZE];
memset(line,'\0',sizeof(line));
char tempVal[15];
memset(tempVal,'\0',sizeof(tempVal));
char* startPtr,*endPtr;
long j;
int i;
float Value;
i=0;
j=0;
memset(line,'\0',sizeof(line));
memset(tempVal,'\0',sizeof(tempVal));
i=0;
j=0;
while(fgets(line,LINESIZE,textFile)!=NULL){
startPtr = line;
for(i=0;i<LONG_SIZE;i++){
Value = 0;
memset(tempVal,'\0',sizeof(tempVal));
if(i != (LONG_SIZE - 1)) {
endPtr = strchr(startPtr,',');
strncpy(tempVal,startPtr,endPtr-startPtr);
//printf("%s ",tempVal);
if(strcmp("NaN",tempVal)==0) {
Value = 0.0;
}
else{
Value = atof(tempVal);
}
dataArray[j * LAT_SIZE + i] = Value;
endPtr = endPtr + 1;
startPtr = endPtr;
//printf("%d,%f ",i,Value);
}
else if(i == (LONG_SIZE - 1)){
strcpy(tempVal,startPtr);
if(strcmp("NaN\n",tempVal)==0) {
Value = 0.0;
}
else{
Value = atof(tempVal);
}
dataArray[j * LAT_SIZE + i] = Value;
}
}
j++;
}
return NULL;
}
//###########################################################################################################################################//
static void* write_dataVars(void* arguments)
{
struct file_IO *inputArgs;
inputArgs = (struct file_IO *)arguments;
float* dataArray,*destArray;
size_t totalSize;
long int i;
dataArray = inputArgs->inpVals;
destArray = inputArgs->streamArray;
totalSize = inputArgs->dataSize;
for(i=0;i<totalSize;i++){
destArray[i] = *(dataArray + i);
}
return NULL;
}
//###########################################################################################################################################//
int convert_to_month(int month,int day)
{
int index,offset;
if(month == 8){
index = 1; //The data starts in august
}
else if(month == 9){
index = 32; //The data for september starts after 31 days of august
}
else if(month == 10){
index = 62; //The data for october starts after 31+30 days of sept and august respectively.
}
else if(month == 11){
index = 93; //The data for october starts after 31+30+31 days of sept,aug and oct respectively.
}
else{
//printf("\n\t\tIncorrect month used\n\t\tUse between August-November inclusive; Only use numbers ; August = 8\n");
return -1;
}
//If 1st or 2nd of August, start at timestep 23 (after 23 hours)
if(((month == 8) && (day == 1))||((month == 8) && (day == 2))){
offset = 23;
//If in August; Gives correct result for starting timestep
}else if (month == 8){
offset = 23 + (day - 1) * TIMESTEPS_PER_DAY ;
//23 added because 1st day only has 23 hours
}else{
offset = 23 + (index - 2 + day) * TIMESTEPS_PER_DAY;
}
return offset;
}
//###########################################################################################################################################//
static void HandleError( cudaError_t err,const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),file, line );
exit( EXIT_FAILURE );
}
}
//###########################################################################################################################################//
//###########################################################################################################################################//
//###########################################################################################################################################//
int main(int argc,char* argv[])
{
//--------------------------Checking for input arguments------------------------------//
char baseFileName[] = "../../Birds_Full/Birds_data/InterpolatedData/";
char yearFileName[80];
char fullFileName[80];
char start_date[12];
char yearStr[4],monthStr[2],dayStr[2];
float starting_row,starting_col;
int offset_into_data = 0;
int NumOfBirds,year,day,month;
int option;
while ((option = getopt(argc, argv,"y:m:d:r:c:N:")) != -1) {
switch (option) {
case 'y' : year = atoi(optarg);
break;
case 'm' : month = atoi(optarg);
break;
case 'd' : day = atoi(optarg);
break;
case 'r' : starting_row = atof(optarg);
break;
case 'c' : starting_col = atof(optarg);
break;
// case 't' : breadth = atoi(optarg);
// break;
case 'N' : NumOfBirds = atoi(optarg);
break;
default: printf("\nUsage: birds -y Year -m Month(Number) -d DayOfTheMonth -r StartingRow -c StartingCol -N NumberOfBirds\n");
exit(EXIT_FAILURE);
}
}
/** If starting row is greater than or equal the row that we are interested in; Below a particular row we are not interested in the flight of the birds**/
if(starting_row >= MAX_LAT_SOUTH){
printf("\t\tProvided starting row is below the southern most lattitude at which the model is set to stop\n");
printf("\t\tEither change the starting row location and/or MAX_LAT upto which the birds can fly\n");
exit(-1);
}
//-----------------------------------------------Day-----------------------------------------//
/** Making sure random date is not provided **/
if((day>0) && (day<32)){
sprintf(dayStr,"%d",day);
}else{
printf("\t\t Invalid date provided; Date should be greater than 0 and less than 32\n");
exit(-1);
}
//-----------------------------------------------Month-----------------------------------------//
/** Making sure month provided is between August and November inclusive **/
if((month < 12) && (month > 7)){
sprintf(monthStr,"%d",month);
}else{
printf("\t\t Invalid month provided; Use between 8 and 11 inclusive\n");
exit(-1);
}
/** Converting month and day information into number of timesteps; Special case of AUG 1st is also taken care of
Instead of AUG 1 it starts at August 2 (because data starts at 7pm but birds fly at 6pm) **/
offset_into_data = convert_to_month(month,day);
printf("Offset into data is: %d\n",offset_into_data);
//-----------------------------------------------Year-----------------------------------------//
/** Checking if correct year specified **/
if((year>= 2008) && (year<=2013)){
//Add file location here
sprintf(yearStr,"%d",year);
strcpy(yearFileName,baseFileName);
strcat(yearFileName,yearStr);
strcat(yearFileName,"/");
}
else{
printf("\n\tInvalid year specified\n\tSpecified %d; Use years from 2008 to 2013 in its full format\n",year);
printf("\t\tUsage: birds -y Year -m Month(Number) -d DayOfTheMonth -r StartingRow -c StartingCol -N NumberOfBirds\n");
exit(-1);
}
strcpy(start_date,yearStr);
strcat(start_date,"/");
strcat(start_date,monthStr);
strcat(start_date,"/");
sprintf(dayStr,"%d",day);
strcat(start_date,dayStr);
//------------Opening row and column output data file where lat and long
//-------------------------------------------- positions are stored--------//
FILE *rowdataTxt,*coldataTxt,*birdStatusTxt;
FILE *vdataTxt,*udataTxt,*v10dataTxt,*u10dataTxt,*precipTxt,*pressureTxt,*lwTxt,*dirTxt;
rowdataTxt = fopen("./Output/row_output.txt","a");
if(rowdataTxt == NULL) {
perror("Cannot open output row data file\n");
exit(-1);
}
coldataTxt = fopen("./Output/col_output.txt","a");
if(coldataTxt == NULL) {
perror("Cannot open output col data file\n");
exit(-1);
}
birdStatusTxt = fopen("./Output/birdStatus_Final.txt","a");
if(birdStatusTxt == NULL) {
perror("Cannot open output birdStatus file\n");
exit(-1);
}
//----------------------Opening U850 data file----------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"U850.txt");
printf("U50 filename is %s \n",fullFileName);
udataTxt = fopen(fullFileName,"r");
if(udataTxt == NULL) {
perror("Cannot open file with U850 data\n");
exit(-1);
}
//------------------------Opening V850 data file--------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"V850.txt");
vdataTxt = fopen(fullFileName,"r");
if(vdataTxt == NULL) {
perror("Cannot open file with V850 data\n");
exit(-1);
}
//-----------------------Opening U10 data file---------------------------//
//Birds will check the wind at the surface therefore the u and v
//at 10m is required
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"U10.txt");
u10dataTxt = fopen(fullFileName,"r");
if(u10dataTxt == NULL) {
perror("Cannot open file with U10 data\n");
exit(-1);
}
//-----------------------Opening V10 data file---------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"V10.txt");
v10dataTxt = fopen(fullFileName,"r");
if(v10dataTxt == NULL) {
perror("Cannot open file with V10 data\n");
exit(-1);
}
//--------------------Opening PRCP data file------------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"PRCP.txt");
precipTxt = fopen(fullFileName,"r");
if(precipTxt == NULL) {
perror("Cannot open file with PRCP data\n");
exit(-1);
}
//------------------------Opening MSLP data file--------------------------//
memset(fullFileName,0,strlen(fullFileName));
strcpy(fullFileName,yearFileName);
strcat(fullFileName,"MSLP.txt");
pressureTxt = fopen(fullFileName,"r");
if(pressureTxt == NULL) {
perror("Cannot open file with pressure data!\n");
exit(-1);
}
//--------------------------Opening Land vs Water File---------------------//
lwTxt = fopen("./Lw_and_Dir/land_water_detail.txt","r");
if(lwTxt == NULL) {
perror("Cannot open file with direction data\n");
exit(-1);
}
//--------------------------Opening Direction file
//--------------------(Example: ext_crop.txt or extP_crop.txt)-------------//
dirTxt = fopen("./Lw_and_Dir/ext_Final_NewCoordSystem.txt","r");
//dirTxt = fopen("ext_crop.txt","r");
if(dirTxt == NULL) {
perror("Cannot open file with direction data\n");
exit(-1);
}
//-----------------------------Setting Heap Size,printf buffer size etc--------------------------------------------//
// size_t limit;
// HANDLE_ERROR(cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 500 * 1024 * 1024));
// cudaDeviceGetLimit(&limit,cudaLimitPrintfFifoSize);
// HANDLE_ERROR(cudaDeviceSetLimit(cudaLimitMallocHeapSize,(size_t)(6 * LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float))));
//--------------------------Memory Allocation for global arrays containing weather data----------------------------//
HANDLE_ERROR(cudaDeviceReset());
float *h_row,*h_col;
float *d_row,*d_col;
float *d_udata,*d_vdata,*d_u10data,*d_v10data,*d_lwData;
float *d_dirData,*d_precipData,*d_pressureData;
int *h_birdTimesteps, *d_birdTimesteps;
uint8_t *h_birdStatus,*d_birdStatus;
//Pinned memory is faster than non-pinned memory only if the amount of transferred data
//is above 16MB
// https://www.cs.virginia.edu/~mwb7w/cuda_support/pinned_tradeoff.html
dirData = (float*) malloc(LAT_SIZE * LONG_SIZE * sizeof(float));
h_row = (float*) malloc(NumOfBirds * (TIMESTEPS + 1) * sizeof(float));
h_col = (float*) malloc(NumOfBirds * (TIMESTEPS + 1) * sizeof(float));
h_birdStatus = (uint8_t*)malloc(NumOfBirds * sizeof(uint8_t));
h_birdTimesteps = (int*)malloc(NumOfBirds * sizeof(int));
lwData = (float*) malloc(LAT_SIZE * LONG_SIZE * sizeof(float));
//------------------------------------------------------------------------------------------------------------------//
HANDLE_ERROR(cudaMallocHost((void **)&udata,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(cudaMallocHost((void **)&vdata,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(cudaMallocHost((void **)&u10data,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(cudaMallocHost((void **)&v10data,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(cudaMallocHost((void **)&precipData,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
HANDLE_ERROR(cudaMallocHost((void **)&pressureData,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)));
//printf("Size of large arrays is %zd\n",sizeof(udata)/sizeof(udata[0]));
//printf("Size of large arrays is %ld\n",sizeof(udata)/sizeof(float));
//printf("Size of large arrays is %ld\n",sizeof(udata)/sizeof(float));
int ii,jj;
for(ii=0;ii<(NumOfBirds * (TIMESTEPS + 1));ii++){
*(h_row + ii) = starting_row;
*(h_col + ii) = starting_col;
}
//Setting the current status of the birds to Alive
//And the current timestep at the starting timestep defined by the user
for(ii=0;ii<NumOfBirds;ii++){
h_birdStatus[ii] = (uint8_t)1;
h_birdTimesteps[ii] = (int)offset_into_data;
}
//--------------------------Initializing the structures for pthreads-----------------------------------------------------------//
inpStruct[0].fp = vdataTxt;
inpStruct[0].inpVals = vdata;
inpStruct[1].fp = udataTxt;
inpStruct[1].inpVals = udata;
inpStruct[2].fp = v10dataTxt;
inpStruct[2].inpVals = v10data;
inpStruct[3].fp = u10dataTxt;
inpStruct[3].inpVals = u10data;
inpStruct[4].fp = precipTxt;
inpStruct[4].inpVals = precipData;
inpStruct[5].fp = pressureTxt;
inpStruct[5].inpVals = pressureData;
inpStruct[6].fp = lwTxt;
inpStruct[6].inpVals = lwData;
inpStruct[7].fp = dirTxt;
inpStruct[7].inpVals = dirData;
/** Using pthreads to read from the files in parallel**/
pthread_t threads[8];
printf("Before pthreads creation \n");
int i;
for(i=0;i<8;i++){
if(pthread_create(&threads[i],NULL,read_dataFiles,(void*)&inpStruct[i]) != 0){
fprintf(stderr,"ERROR: Thread creation using pthreads failed\n");
exit(-1);
}
}
printf("After pthreads creation and before join\n ");
for(i=0;i<8;i++){
if(pthread_join(threads[i],NULL)!=0){
fprintf(stderr,"ERROR: Thread join failed\n");
exit(-1);
}
}
printf("After pthreads join\n ");
printf("End of parallel data read\n");
int DeviceCount;
/** Getting the total number of devices available **/
HANDLE_ERROR(cudaGetDeviceCount(&DeviceCount));
HANDLE_ERROR(cudaSetDevice(DeviceCount - 1));
//-------------------------------------------------------------------------------------------------------------------------//
HANDLE_ERROR(cudaMalloc((void**)&d_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_lwData,LAT_SIZE * LONG_SIZE * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_dirData,LAT_SIZE * LONG_SIZE * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_birdStatus,NumOfBirds * sizeof(uint8_t)));
HANDLE_ERROR(cudaMalloc((void**)&d_birdTimesteps,NumOfBirds * sizeof(int)));
printf("After cudaMalloc of non changing data\n");
/*
HANDLE_ERROR(cudaMemcpyAsync(d_row,h_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyHostToDevice,streams_posData[0]));
HANDLE_ERROR(cudaMemcpyAsync(d_col,h_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyHostToDevice,streams_posData[1]));
HANDLE_ERROR(cudaMemcpyAsync(d_lwData,lwData,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyHostToDevice,streams_posData[2]));
HANDLE_ERROR(cudaMemcpyAsync(d_dirData,dirData,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyHostToDevice,streams_posData[3]));
HANDLE_ERROR(cudaMemcpyAsync(d_birdStatus,h_birdStatus,NumOfBirds * sizeof(uint8_t),cudaMemcpyHostToDevice,streams_posData[4]));
*/
HANDLE_ERROR(cudaMemcpy(d_row,h_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_col,h_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_lwData,lwData,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_dirData,dirData,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_birdStatus,h_birdStatus,NumOfBirds * sizeof(uint8_t),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_birdTimesteps,h_birdTimesteps,NumOfBirds * sizeof(int),cudaMemcpyHostToDevice));
printf("After cudaMemcpy of non changing data\n");
//------------------------------------Getting the size of data needed per transfer---------------------------------------------//
//Maximum number of days that a bird can fly continiously
int MaxFlightDays = BIRD_HRS_LIMIT/TIMESTEPS_PER_DAY;
long int TimestepsPerTransfer = (TOTAL_DAYS_PER_DATA_TRANSFER + MaxFlightDays) * TIMESTEPS_PER_DAY;
long int TotalDataPerIteration = TimestepsPerTransfer * LAT_SIZE * LONG_SIZE * sizeof(float);
long int TotalDataPerDay = TIMESTEPS_PER_DAY * LAT_SIZE * LONG_SIZE * sizeof(float);
long int TotalDataForThreeDays = 3 * TIMESTEPS_PER_DAY * LAT_SIZE * LONG_SIZE * sizeof(float);
// int DaysTransferrable = TOTAL_DAYS - 1 - (MaxFlightDays - 1);
int DaysTransferrable = ((TOTAL_DAYS - 1 - MaxFlightDays + 1)/TOTAL_DAYS_PER_DATA_TRANSFER) * TOTAL_DAYS_PER_DATA_TRANSFER;
int DaysRemaining_Transferrable = (TOTAL_DAYS - MaxFlightDays) - DaysTransferrable ;
//-----------------------------------------------------------------------------------------------------------------------------//
long int h_offset,d_offset,h_offsetStart,d_offsetStart;
int start_timestep,cur_timestep,max_timesteps;
h_offset = INITIAL_SKIP_TIMESTEPS * TIMESTEPS_PER_DAY * LAT_SIZE * LONG_SIZE;
d_offset = 0;
cur_timestep = offset_into_data;
HANDLE_ERROR(cudaSetDevice(DeviceCount - 1));
printf("After selecting the correct device\n");
HANDLE_ERROR(cudaMalloc((void**)&d_udata,TotalDataPerIteration));
HANDLE_ERROR(cudaMalloc((void**)&d_vdata,TotalDataPerIteration));
HANDLE_ERROR(cudaMalloc((void**)&d_u10data,TotalDataPerIteration));
HANDLE_ERROR(cudaMalloc((void**)&d_v10data,TotalDataPerIteration));
HANDLE_ERROR(cudaMalloc((void**)&d_precipData,TotalDataPerIteration));
HANDLE_ERROR(cudaMalloc((void**)&d_pressureData,TotalDataPerIteration));
printf("After cudaMalloc for the changing data\n");
HANDLE_ERROR(cudaDeviceSynchronize());
int current_index, next_index;
//current_index = ((offset_into_data - INITIAL_SKIP_TIMESTEPS)/24) % total_streams;
ii = -1;
jj = 0;
current_index = 0;
//printf("After cudaMemcpyAsync for the changing data\n");
dim3 gridSize((NumOfBirds + 32 - 1)/32,1,1);
dim3 blockSize(32,1,1);
int zz = 0;
//-----------------------------------Creating streams-------------------------------------------//
//Hardcoded for Kepler architecture
const int total_streams = 32;
cudaStream_t streams[total_streams];
for(i = 0;i<total_streams;i++){
HANDLE_ERROR(cudaStreamCreate(&streams[i]));
}
printf("After streams creation for the changing data\n");
for(i=0;i<DaysTransferrable;i=i+1){
//HANDLE_ERROR(cudaSetDevice(DeviceCount - 1));
start_timestep = i * TIMESTEPS_PER_DAY + INITIAL_SKIP_TIMESTEPS;
max_timesteps = start_timestep + TimestepsPerTransfer;
cur_timestep = start_timestep;
//Has to change once the start dates for each bird changes
if(start_timestep >= offset_into_data){
//All of these are inclusive
//If TimeStepsPerTransfer is 9, then they would be: 0-8, 9-17, 18-26,...
start_timestep = i * TIMESTEPS_PER_DAY + INITIAL_SKIP_TIMESTEPS;
max_timesteps = start_timestep + TimestepsPerTransfer;
cur_timestep = start_timestep;
ii = ii + 1;
h_offset = (TIMESTEPS_PER_DAY * LAT_SIZE * LONG_SIZE) * i + INITIAL_SKIP_TIMESTEPS;
d_offset = TotalDataPerDay/sizeof(float) *(ii % 5);
h_offsetStart = h_offset;
d_offsetStart = d_offset;
HANDLE_ERROR(cudaMemcpyAsync(d_udata + d_offset,udata+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[0]));
HANDLE_ERROR(cudaMemcpyAsync(d_vdata + d_offset,vdata+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[1]));
HANDLE_ERROR(cudaMemcpyAsync(d_u10data + d_offset,u10data+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[2]));
HANDLE_ERROR(cudaMemcpyAsync(d_v10data + d_offset,v10data+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[3]));
HANDLE_ERROR(cudaMemcpyAsync(d_precipData + d_offset,precipData+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[4]));
HANDLE_ERROR(cudaMemcpyAsync(d_pressureData + d_offset,pressureData+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[5]));
i = i + 1;
//-----------------------------------------------------------------------------------------------------------------------------//
ii = ii + 1;
h_offset = (TIMESTEPS_PER_DAY * LAT_SIZE * LONG_SIZE) * i + INITIAL_SKIP_TIMESTEPS;
d_offset = TotalDataPerDay/sizeof(float) *(ii % 5);
HANDLE_ERROR(cudaMemcpyAsync(d_udata + d_offset,udata+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[6]));
HANDLE_ERROR(cudaMemcpyAsync(d_vdata + d_offset,vdata+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[7]));
HANDLE_ERROR(cudaMemcpyAsync(d_u10data + d_offset,u10data+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[8]));
HANDLE_ERROR(cudaMemcpyAsync(d_v10data + d_offset,v10data+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[9]));
HANDLE_ERROR(cudaMemcpyAsync(d_precipData + d_offset,precipData+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[10]));
HANDLE_ERROR(cudaMemcpyAsync(d_pressureData + d_offset,pressureData+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[11]));
i = i + 1;
//-----------------------------------------------------------------------------------------------------------------------------//
ii = ii + 1;
h_offset = (TIMESTEPS_PER_DAY * LAT_SIZE * LONG_SIZE) * i + INITIAL_SKIP_TIMESTEPS;
d_offset = TotalDataPerDay/sizeof(float) *(ii % 5);
HANDLE_ERROR(cudaMemcpyAsync(d_udata + d_offset,udata+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[12]));
HANDLE_ERROR(cudaMemcpyAsync(d_vdata + d_offset,vdata+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[13]));
HANDLE_ERROR(cudaMemcpyAsync(d_u10data + d_offset,u10data+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[14]));
HANDLE_ERROR(cudaMemcpyAsync(d_v10data + d_offset,v10data+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[15]));
HANDLE_ERROR(cudaMemcpyAsync(d_precipData + d_offset,precipData+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[16]));
HANDLE_ERROR(cudaMemcpyAsync(d_pressureData + d_offset,pressureData+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[17]));
i = i + 1;
//-----------------------------------------------------------------------------------------------------------------------------//
ii = ii + 1;
h_offset = (TIMESTEPS_PER_DAY * LAT_SIZE * LONG_SIZE) * i + INITIAL_SKIP_TIMESTEPS;
d_offset = TotalDataPerDay/sizeof(float) *(ii % 5);
HANDLE_ERROR(cudaMemcpyAsync(d_udata + d_offset,udata+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[18]));
HANDLE_ERROR(cudaMemcpyAsync(d_vdata + d_offset,vdata+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[19]));
HANDLE_ERROR(cudaMemcpyAsync(d_u10data + d_offset,u10data+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[20]));
HANDLE_ERROR(cudaMemcpyAsync(d_v10data + d_offset,v10data+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[21]));
HANDLE_ERROR(cudaMemcpyAsync(d_precipData + d_offset,precipData+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[22]));
HANDLE_ERROR(cudaMemcpyAsync(d_pressureData + d_offset,pressureData+h_offset,TotalDataPerDay,cudaMemcpyHostToDevice,streams[23]));
i = i + 1;
//-----------------------------------------------------------------------------------------------------------------------------//
ii = ii + 1;
h_offset = (TIMESTEPS_PER_DAY * LAT_SIZE * LONG_SIZE) * i + INITIAL_SKIP_TIMESTEPS;
d_offset = TotalDataPerDay/sizeof(float) *(ii % 5);
HANDLE_ERROR(cudaMemcpyAsync(d_udata + d_offset,udata+h_offset,TotalDataForThreeDays,cudaMemcpyHostToDevice,streams[24]));
HANDLE_ERROR(cudaMemcpyAsync(d_vdata + d_offset,vdata+h_offset,TotalDataForThreeDays,cudaMemcpyHostToDevice,streams[25]));
HANDLE_ERROR(cudaMemcpyAsync(d_u10data + d_offset,u10data+h_offset,TotalDataForThreeDays,cudaMemcpyHostToDevice,streams[26]));
HANDLE_ERROR(cudaMemcpyAsync(d_v10data + d_offset,v10data+h_offset,TotalDataForThreeDays,cudaMemcpyHostToDevice,streams[27]));
HANDLE_ERROR(cudaMemcpyAsync(d_precipData + d_offset,precipData+h_offset,TotalDataForThreeDays,cudaMemcpyHostToDevice,streams[28]));
HANDLE_ERROR(cudaMemcpyAsync(d_pressureData + d_offset,pressureData+h_offset,TotalDataForThreeDays,cudaMemcpyHostToDevice,streams[29]));
for(jj=0;jj<total_streams;jj++){
HANDLE_ERROR(cudaStreamSynchronize(streams[jj]));
}
printf("########################################################################");
printf("Kernel call# %d\n",zz);
bird_movement<<<gridSize,blockSize,0,streams[30]>>>(d_row,d_col,NumOfBirds,start_timestep,cur_timestep,max_timesteps,
d_udata,d_vdata,d_u10data,d_v10data,d_dirData,d_precipData,d_pressureData,d_lwData,d_birdStatus,d_birdTimesteps);
zz++;
HANDLE_ERROR(cudaStreamSynchronize(streams[30]));
}
}
printf("Number of days: %d\n",i);
HANDLE_ERROR(cudaMemcpy(h_row,d_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(h_col,d_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyDeviceToHost));
for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){
fprintf(rowdataTxt,"%f ",h_row[i]);
if(((i+1) % (TIMESTEPS + 1)) == 0){
fprintf(rowdataTxt,"%f \n",h_row[i]);
}
}
for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){
fprintf(coldataTxt,"%f ",h_col[i]);
if(((i+1) % (TIMESTEPS + 1)) == 0){
fprintf(coldataTxt,"%f \n",h_col[i]);
}
}
//-----------------------------------------------Freeing allocated memory--------------------------------------//
for(i = 0;i<total_streams;i++){
HANDLE_ERROR(cudaStreamDestroy(streams[i]));
}
HANDLE_ERROR(cudaFree(d_udata));
HANDLE_ERROR(cudaFree(d_vdata));
HANDLE_ERROR(cudaFree(d_u10data));
HANDLE_ERROR(cudaFree(d_v10data));
HANDLE_ERROR(cudaFree(d_precipData));
HANDLE_ERROR(cudaFree(d_pressureData));
HANDLE_ERROR(cudaFree(d_row));
HANDLE_ERROR(cudaFree(d_col));
HANDLE_ERROR(cudaFree(d_birdStatus));
HANDLE_ERROR(cudaFree(d_birdTimesteps));
HANDLE_ERROR(cudaFreeHost(udata));
HANDLE_ERROR(cudaFreeHost(vdata));
HANDLE_ERROR(cudaFreeHost(u10data));
HANDLE_ERROR(cudaFreeHost(v10data));
HANDLE_ERROR(cudaFreeHost(precipData));
HANDLE_ERROR(cudaFreeHost(pressureData));
free(dirData);
free(h_row);
free(h_col);
free(lwData);
free(h_birdStatus);
free(h_birdTimesteps);
fclose(birdStatusTxt);
fclose(dirTxt);
fclose(udataTxt);
fclose(vdataTxt);
fclose(v10dataTxt);
fclose(u10dataTxt);
fclose(precipTxt);
fclose(pressureTxt);
fclose(lwTxt);
fclose(rowdataTxt);
fclose(coldataTxt);
//printf("End\n");
HANDLE_ERROR(cudaDeviceReset());
exit(0);
return 0;
}
|
13,003 | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 256
__global__ void vecAdd(int *a,int *b,int *c);
void vecAdd_cpu(int a[N],int b[N],int c[N]){
for(int i=0;i<N;i++){
c[i]=a[i]+b[i];
}
}
int main(){
int a[N],b[N],gpu_add[N],cpu_add[N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
for(int i=0;i<N;i++){
a[i]=i+i;
b[i]=i*i;
}
int size=N*sizeof(int);
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b,size);
cudaMalloc((void**) &dev_c,size);
cudaEvent_t startinit,endinit;
cudaEventCreate(&startinit);
cudaEventCreate(&endinit);
cudaEventRecord(startinit, 0);
cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size,cudaMemcpyHostToDevice);
cudaEventRecord(endinit, 0);
cudaEventSynchronize(endinit);
cudaEventElapsedTime(&timeinit, startinit, endinit);
cudaEvent_t gpu_start,gpu_end;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_end);
cudaEventRecord(gpu_start, 0);
vecAdd<<<1,N>>> (dev_a,dev_b,dev_c);
cudaDeviceSynchronize();
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end);
cudaEventElapsedTime(&time_gpu, gpu_start, gpu_end);
cudaEvent_t startindex,endindex;
cudaEventCreate(&startindex);
cudaEventCreate(&endindex);
cudaEventRecord(startindex, 0);
cudaMemcpy(gpu_add,dev_c,size,cudaMemcpyDeviceToHost);
cudaEventRecord(endindex, 0);
cudaEventSynchronize(endindex);
cudaEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
vecAdd_cpu(a,b,cpu_add);
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with 1 block and %d threads\n",N);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
if(gpu_add[i]!=cpu_add[i]){
flag=0;
break;
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
exit(0);
}
__global__ void vecAdd(int *a,int *b,int *c){
int i=threadIdx.x;
c[i]=a[i]+b[i];
}
|
13,004 | #define ulong unsigned long long
#define uint unsigned int
// B=A%MODP
__global__ void CreateAmod(uint *arrayB,uint *arrayA,uint modp) {
uint idx = threadIdx.x+blockIdx.x*256;
arrayB[idx]=arrayA[idx]%modp;
}
//C=A-B
__global__ void SubCAB(uint *arrayC,uint *arrayA,uint *arrayB,uint arrayLength) {
int idx = threadIdx.x+blockIdx.x*256;
uint flag=0;
if (idx==0)flag=1;
uint b=4294967295-arrayB[idx];
uint a=arrayA[idx];
uint upflag=0;
if ((a==4294967295)&(flag==1)){
upflag=1;
a=0;
}
uint c=a+b;
if (c<b){
upflag=1;
}
uint lastC_0 = atomicAdd( &arrayC[idx+0], c );
if ((lastC_0+c)<lastC_0){//繰り上がりを考慮
upflag++;
}
uint lastC_i;
for(int i=idx+1;i<arrayLength;i++){ //9999999+1みたいなとき用
if (upflag==0)break;
lastC_i = atomicAdd( &arrayC[i], upflag );
if ((lastC_i+upflag)<lastC_i){
upflag=1;
}else{
upflag=0;
}
}
}
//C=A+B
__global__ void AddCAB(uint *arrayC,uint *arrayA,uint *arrayB,uint arrayLength) {
int idx = threadIdx.x+blockIdx.x*256;
uint b=arrayB[idx];
uint a=arrayA[idx];
uint upflag=0;
uint c=a+b;
if (c<b){
upflag=1;
}
uint lastC_0 = atomicAdd( &arrayC[idx+0], c );
if ((lastC_0+c)<lastC_0){//繰り上がりを考慮
upflag++;
}
uint lastC_i;
for(int i=idx+1;i<arrayLength;i++){ //9999999+1みたいなとき用
if (upflag==0)break;
lastC_i = atomicAdd( &arrayC[i], upflag );
if ((lastC_i+upflag)<lastC_i){
upflag=1;
}else{
upflag=0;
}
}
}
//C=0
__global__ void SetZero(uint *arrayC) {
int idx = threadIdx.x+blockIdx.x*256;
arrayC[idx]=0;
}
//Bは最初0に初期化必要
//B=A*(1<<n)
__global__ void ShiftBA(uint *arrayB,uint *arrayA,int n,uint arrayLength) {
int idx = threadIdx.x+blockIdx.x*256;
uint a=arrayA[idx];
int nn=n/32;
n-=nn*32;
if (n<0){
n+=32;
nn-=1;
}
uint sa0=a<<((uint)n);//下の位
uint sa1=a>>((uint)(32-n));//下の位
if ((idx+nn<arrayLength)&((idx+nn>=0))){
atomicAdd( &arrayB[idx+nn], sa0 );
}
if (((idx+nn+1)<arrayLength)&(((idx+nn+1)>=0))){
atomicAdd( &arrayB[idx+nn+1], sa1 );
}
}
//memcpy_uint ver
//allow overflow
__global__ void memcpy_dtod_ui4(uint *dst,uint *src,uint dstoffset,uint srcoffset,uint copysize) {
int idx = threadIdx.x+blockIdx.x*256;
if (idx<copysize){
dst[idx+dstoffset]=src[idx+srcoffset];
}
}
//ニュートン法でやる最初の逆数の計算
//outに2要素で出力
__global__ void FirstNewtonRev(uint *out,uint *A,uint index) {
ulong a=A[index];
ulong b=18446744073709551615;
ulong c=b/a;
if ((b%a)==(a-1)){
c++;
}
uint c0=c%4294967296;
uint c1=c/4294967296;
out[0]=c0;
out[1]=c1;
}
//ニュートン法でやる最初の逆数の計算
//outに2要素で出力
__global__ void FirstNewtonSqrt(uint *out,uint *A,uint index) {
double a=(double)A[index];
a/=4294967296.0;
a=1.0/sqrt(a);
a*=4294967296.0;
ulong c=(ulong)a;
uint c0=c%4294967296;
uint c1=c/4294967296;
out[0]=c0;
out[1]=c1;
} |
13,005 | #include "includes.h"
__global__ void sumThreadedResultsKernel(long *dev_hist, int *dev_threadedHist, const int valRange, const int Blocks)
{
//e.g. tid from 0 to valRange-1, blocks = THREADS_PER_BLOCK * NO_BLOCKS
int tid = threadIdx.x;
for (int bl = 0; bl < Blocks; bl++)
{
dev_hist[tid] += dev_threadedHist[bl*valRange + tid];
}
} |
13,006 | #include <iostream>
// System includes
#include <assert.h>
#include <stdio.h>
// CUDA runtime
#include <cuda_runtime.h>
using namespace std;
__host__ __device__ int sum(dim3 three){
return three.x + three.y + three.z;
}
__global__ void three_to_two(int* data_3d, int* data_2d, int size){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < size && y < size){
for (int i = 0; i < size; ++i)
{
data_2d[x + size * y] += data_3d[x + size * y + size * size * i];
}
}
}
__global__ void two_to_one(int* data_2d, int* data_1d, int size){
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < size){
for (int i = 0; i < size; ++i)
{
data_1d[x] += data_2d[x + size * i];
}
}
}
__global__ void one_to_zero(int* data_1d, int* data_0d, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < 1){
for (int i = 0; i < size; ++i)
{
data_0d[0] += data_1d[i];
}
}
}
__global__ void test(dim3 data){
return;
}
int main(){
int* data_3d;
int* data_2d;
int* data_1d;
int* data_0d;
cudaMalloc(&data_3d, 216 * sizeof(int));
cudaMalloc(&data_2d, 36 * sizeof(int));
cudaMalloc(&data_1d, 6 * sizeof(int));
cudaMalloc(&data_0d, 1 * sizeof(int));
int* data_3h = new int(216);
for (int i = 0; i < 216; ++i)
{
data_3h[i] = i;
}
cudaMemcpy(data_3d, data_3h, 216 * sizeof(int), cudaMemcpyHostToDevice);
dim3 tpb(3, 3);
dim3 bpg(2, 2);
three_to_two<<<bpg, tpb>>>(data_3d, data_2d, 6);
tpb = (3);
bpg = (2);
two_to_one<<<bpg, tpb>>>(data_2d, data_1d, 6);
tpb = (1);
bpg = (1);
one_to_zero<<<bpg, tpb>>>(data_1d, data_0d, 6);
int answer;
cudaMemcpy(&answer, data_0d, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << answer << "\n";
test<<<bpg, tpb>>>(bpg);
std::cout << "done\n";
return 0;
} |
13,007 | #include <iostream>
#include <cuda.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include<stdlib.h>
using namespace std;
__global__ void mykernel1(unsigned long long* time)
{
__shared__ float shared[1024];
// clock returns clock ticks
unsigned long long startTime = clock();
//all threads are accessing the same location (broadcast)
shared[0]++;
unsigned long long finishTime = clock();
*time = (finishTime-startTime);
}
__global__ void mykernel2(unsigned long long* time)
{
__shared__ float shared[1024];
unsigned long long startTime = clock();
// no bank conflict
shared[threadIdx.x]++;
unsigned long long finishTime = clock();
*time = (finishTime-startTime);
}
__global__ void mykernel3(unsigned long long* time)
{
__shared__ float shared[1024];
unsigned long long startTime = clock();
shared[threadIdx.x*4]++;
unsigned long long finishTime = clock();
*time = (finishTime-startTime);
}
__global__ void mykernel4(unsigned long long* time)
{
__shared__ float shared[1024];
unsigned long long startTime = clock();
shared[threadIdx.x*8]++;
unsigned long long finishTime = clock();
*time = (finishTime-startTime);
}
__global__ void mykernel5(unsigned long long* time)
{
__shared__ float shared[1024];
unsigned long long startTime = clock();
shared[threadIdx.x*32]++;
unsigned long long finishTime = clock();
*time = (finishTime-startTime);
}
int main()
{
unsigned long long time;
unsigned long long* d_time;
cudaMalloc(&d_time, sizeof(unsigned long long));
mykernel1<<<1,32>>>(d_time);
cudaMemcpy(&time, d_time, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
printf("Time for shared[0]: %d\n",(time-14)/32);
mykernel2<<<1,32>>>(d_time);
cudaMemcpy(&time, d_time, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
printf("Time for shared[threadIdx.x]: %d\n",(time-14)/32);
mykernel3<<<1,32>>>(d_time);
cudaMemcpy(&time, d_time, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
printf("Time for shared[threadIdx.x*4]: %d\n",(time-14)/32);
mykernel4<<<1,32>>>(d_time);
cudaMemcpy(&time, d_time, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
printf("Time for shared[threadIdx.x*8] : %d\n",(time-14)/32);
mykernel5<<<1,32>>>(d_time);
cudaMemcpy(&time, d_time, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
printf("Time for shared[threadIdx.x*32]: %d\n",(time-14)/32);
cudaFree(d_time);
//needed when you want to use profiler
cudaDeviceReset();
return 0;
}
|
13,008 | #include "includes.h"
__global__ void deInterleave_kernel(float *d_X_out, float *d_Y_out, float2 *d_XY_in, int pitch_out, int pitch_in, int width, int height) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < width) & (y < height)) { // are we in the image?
float2 XY = *((float2 *)((char *)d_XY_in + y * pitch_in) + x);
*((float *)((char *)d_X_out + y *pitch_out) + x) = XY.x;
*((float *)((char *)d_Y_out + y *pitch_out) + x) = XY.y;
}
} |
13,009 | //*****************************************************************************************//
// hough_kernel.cu - CUDA Hough Transform Benchmark
//
// Authors: Ramnarayan Krishnamurthy, University of Colorado (Shreyas.Ramnarayan@gmail.com)
// Matthew Demi Vis, Embry-Riddle Aeronautical University (MatthewVis@gmail.com)
//
// This code was used to obtain results documented in the SPIE Sensor and Technologies paper:
// S. Siewert, V. Angoth, R. Krishnamurthy, K. Mani, K. Mock, S. B. Singh, S. Srivistava,
// C. Wagner, R. Claus, M. Demi Vis, “Software Defined Multi-Spectral Imaging for Arctic
// Sensor Networks”, SPIE Algorithms and Technologies for Multipectral, Hyperspectral, and
// Ultraspectral Imagery XXII, Baltimore, Maryland, April 2016.
//
// This code was developed for, tested and run on a Jetson TK1 development kit by NVIDIA
// running Ubuntu 14.04
//
// Please use at your own risk. We are sharing so that other researchers and developers can
// recreate our results and make suggestions to improve and extend the benchmarks over time.
//
//*****************************************************************************************//
#include <stdio.h>
#include <math.h>
#define MAXRGB 255
__global__ void sobel(u_char * frame_in, u_char * frame_out, int width, int height )
{
int x = blockDim.x*blockIdx.x+threadIdx.x;
int y = blockDim.y*blockIdx.y+threadIdx.y;
int index = x + y*width;
long int size = width*height;
//Sobel Implementation
int Gx[3][3] = {{-1,-2,-1},{0,0,0},{1,2,1}};
int Gy[3][3] = {{-1,0,1},{-2,0,2},{-1,0,1}};
int G_x=0,G_y=0,G = 0;
int i=0,j=0;
if (index < size && (x>1 && y>1) && (x < (width-1) && y < (height-1 )) )
{
for(i=0;i<3;i++)
{
for(j=0;j<3;j++)
{
G_y += (Gy[i][j])*frame_in[(x+j-1)+(width*(y+i-1))];
G_x += (Gx[i][j])*frame_in[(x+j-1)+(width*(y+i-1))];
}
}
G = abs(G_x) + abs(G_y);
if(G>MAXRGB)
frame_out[index] = MAXRGB;
else
frame_out[index] = G;
}
}
//***************************************************************//
// simple wrapper to keep cuda code in just the kernel file.
//***************************************************************//
void sobel_wrapper(u_char * frame_in, u_char * frame_out, int width, int height, dim3 grid, dim3 block)
{
// Complete the Sobel transform to find edges
sobel<<<grid,block>>>(frame_in, frame_out,width,height);
}
__global__ void houghTransform(u_char * frame_in, u_char * frame_out,const int hough_h)
{
int x = blockDim.x*blockIdx.x+threadIdx.x;
int y = blockDim.y*blockIdx.y+threadIdx.y;
int width = gridDim.x*blockDim.x;
int height = gridDim.y*blockDim.y;
int index = x + y*width;
double DEG2RAD = 0.0174533;
//double DEG2RAD = 1;
double center_x = width/2;
double center_y = height/2;
if( frame_in[index] > 250 ) //checking for the values greater than 250, has to be modified if we have different threshold
{
for(int t=0;t<180;t++)
{
double r = ( ((double)x - center_x) * cos((double)t * DEG2RAD)) + (((double)y - center_y) * sin((double)t * DEG2RAD)); //plotting x and y in ro and theta
frame_out[ (int)((round(r + hough_h) * 180.0)) + t]++;
}
}
}
//***************************************************************//
// simple wrapper to keep cuda code in just the kernel file.
//***************************************************************//
void houghTransform_wrapper(u_char * frame_in, u_char * frame_out, const int hough_h, dim3 grid, dim3 block)
{
// Complete the Hough transform on the transformed sobel image
houghTransform<<<grid,block>>>(frame_in, frame_out, hough_h);
}
|
13,010 | #include "includes.h"
__global__ void allocateNodesKernel(int size, int *adjIndexes, int *adjacency, int *partIn, int *partOut, int *aggregated) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
if (aggregated[idx] == 0)
{
int start = adjIndexes[idx];
int end = adjIndexes[idx + 1];
// Storage for possible aggregations.
int candidates[10];
int candidateCounts[10];
for (int i = 0; i < 10; i++)
{
candidates[i] = -1;
candidateCounts[i] = 0;
}
// Going through neighbors to aggregate:
for (int i = start; i < end; i++)
{
int candidate = partIn[adjacency[i]];
if (candidate != -1)
{
for (int j = 0; j < 10 && candidate != -1; j++)
{
if (candidates[j] == -1)
{
candidates[j] = candidate;
candidateCounts[j] = 1;
} else
{
if (candidates[j] == candidate)
{
candidateCounts[j] += 1;
candidate = -1;
}
}
}
}
}
// Finding the most adjacent aggregate and adding node to it:
int addTo = candidates[0];
int count = candidateCounts[0];
for (int i = 1; i < 10; i++)
{
if (candidateCounts[i] > count)
{
count = candidateCounts[i];
addTo = candidates[i];
}
}
partOut[idx] = addTo;
if (addTo != -1)
{
aggregated[idx] = 1;
}
}
}
} |
13,011 | //////////////////////////////////////////////////////////////////////////
////This is the code implementation for GPU Premier League Round 3: sparse linear solver
//////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <fstream>
#include <chrono>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/reduce.h>
// #include <thrust/fill.h>
// #include <thrust/sequence.h>
// #include <thrust/transform.h>
// #include <thrust/replace.h>
// #include <thrust/functional.h>
// #include <thrust/sort.h>
// #include <thrust/extrema.h>
// #include <thrust/inner_product.h>
using namespace std::chrono;
//////////////////////////////////////////////////////////////////////////
////TODO 0: Please replace the following strings with your team name and author names
////Note: Please do not use space in the string, use "_" instead
//////////////////////////////////////////////////////////////////////////
namespace name
{
std::string team="using_namespace_std;";
std::string author_1="Jeff Liu";
};
//////////////////////////////////////////////////////////////////////////
////TODO: Read the following three CPU implementations for Jacobi, Gauss-Seidel, and Red-Black Gauss-Seidel carefully
////and understand the steps for these numerical algorithms
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
////These are the global variables that define the domain of the problem to solver
////You will need to use these parameters or macros in your GPU implementations
//////////////////////////////////////////////////////////////////////////
const int n=128; ////grid size, we will change this value to up to 256 to test your code
const int g=1; ////padding size
const int s=(n+2*g)*(n+2*g); ////array size
#define I(i,j) (i+g)*(n+2*g)+(j+g) ////2D coordinate -> array index
#define B(i,j) i<0||i>=n||j<0||j>=n ////check boundary
const bool verbose=false; ////set false to turn off print for x and residual
const double tolerance=1e-3; ////tolerance for the iterative solver
//////////////////////////////////////////////////////////////////////////
////The following are three sample implementations for CPU iterative solvers
void Jacobi_Solver(double* x,const double* b)
{
double* buf=new double[s];
memcpy(buf,x,sizeof(double)*s);
double* xr=x; ////read buffer pointer
double* xw=buf; ////write buffer pointer
int iter_num=0; ////iteration number
int max_num=1e5; ////max iteration number
// int max_num = 20;
double residual=10.0; ////residual
auto cpu_start = system_clock::now();
do{
////update x values using the Jacobi iterative scheme
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
xw[I(i,j)]=(b[I(i,j)]+xr[I(i-1,j)]+xr[I(i+1,j)]+xr[I(i,j-1)]+xr[I(i,j+1)])/4.0;
// std::cout << xw[I(i,j)] << ", \t";
// printf("%.0lf, \t", xw[I(i,j)]);
}
// std::cout <<std::endl;
}
////calculate residual
residual=0.0;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
residual+=pow(4.0*xw[I(i,j)]-xw[I(i-1,j)]-xw[I(i+1,j)]-xw[I(i,j-1)]-xw[I(i,j+1)]-b[I(i,j)],2);
}
}
if(verbose)std::cout<<"res: "<<residual<<std::endl;
////swap the buffers
double* swap=xr;
xr=xw;
xw=swap;
iter_num++;
}while(residual>tolerance&&iter_num<max_num);
// x=xw;
if(verbose){
std::cout<<"\n\nx for Jacobi:\n";
for(int i=-1;i<=n;i++){
for(int j=-1;j<=n;j++){
printf("%.0lf, \t", xr[I(i,j)]);
}
std::cout<<std::endl;
}
}
x=xr;
std::cout<<"Jacobi solver converges in "<<iter_num<<" iterations, with residual "<<residual<<std::endl;
auto cpu_end = system_clock::now();
duration<double> cpu_time=cpu_end-cpu_start;
std::cout<<"CPU runtime: "<<cpu_time.count()*1000.<<" ms."<<std::endl;
delete [] buf;
}
void Gauss_Seidel_Solver(double* x,const double* b)
{
int iter_num=0; ////iteration number
int max_num=1e5; ////max iteration number
double residual=0.0; ////residual
auto cpu_start = system_clock::now();
do{
////update x values using the Gauss-Seidel iterative scheme
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
x[I(i,j)]=(b[I(i,j)]+x[I(i-1,j)]+x[I(i+1,j)]+x[I(i,j-1)]+x[I(i,j+1)])/4.0;
}
}
////calculate residual
residual=0.0;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
residual+=pow(4.0*x[I(i,j)]-x[I(i-1,j)]-x[I(i+1,j)]-x[I(i,j-1)]-x[I(i,j+1)]-b[I(i,j)],2);
}
}
if(verbose)std::cout<<"res: "<<residual<<std::endl;
iter_num++;
}while(residual>tolerance&&iter_num<max_num);
std::cout<<"Gauss-Seidel solver converges in "<<iter_num<<" iterations, with residual "<<residual<<std::endl;
auto cpu_end = system_clock::now();
duration<double> cpu_time=cpu_end-cpu_start;
std::cout<<"CPU runtime: "<<cpu_time.count()*1000.<<" ms."<<std::endl;
}
void Red_Black_Gauss_Seidel_Solver(double* x,const double* b)
{
int iter_num=0; ////iteration number
int max_num=1e5; ////max iteration number
double residual=0.0; ////residual
auto cpu_start = system_clock::now();
do{
////red G-S
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if((i+j)%2==0) ////Look at this line!
x[I(i,j)]=(b[I(i,j)]+x[I(i-1,j)]+x[I(i+1,j)]+x[I(i,j-1)]+x[I(i,j+1)])/4.0;
}
}
////black G-S
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if((i+j)%2==1) ////And this line!
x[I(i,j)]=(b[I(i,j)]+x[I(i-1,j)]+x[I(i+1,j)]+x[I(i,j-1)]+x[I(i,j+1)])/4.0;
}
}
////calculate residual
residual=0.0;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
residual+=pow(4.0*x[I(i,j)]-x[I(i-1,j)]-x[I(i+1,j)]-x[I(i,j-1)]-x[I(i,j+1)]-b[I(i,j)],2);
}
}
if(verbose)std::cout<<"res: "<<residual<<std::endl;
iter_num++;
}while(residual>tolerance&&iter_num<max_num);
std::cout<<"Red-Black Gauss-Seidel solver converges in "<<iter_num<<" iterations, with residual "<<residual<<std::endl;
auto cpu_end = system_clock::now();
duration<double> cpu_time=cpu_end-cpu_start;
std::cout<<"CPU runtime: "<<cpu_time.count()*1000.<<" ms."<<std::endl;
}
//////////////////////////////////////////////////////////////////////////
////In this function, we are solving a Poisson equation -laplace(p)=b, with p=x^2+y^2 and b=4.
////The boundary conditions are set on the one-ring ghost cells of the grid
//////////////////////////////////////////////////////////////////////////
void Test_CPU_Solvers()
{
double* x=new double[s];
memset(x,0x0000,sizeof(double)*s);
double* b=new double[s];
for(int i=-1;i<=n;i++){
for(int j=-1;j<=n;j++){
b[I(i,j)]=4.0; ////set the values for the right-hand side
}
}
//////////////////////////////////////////////////////////////////////////
////test Jacobi
for(int i=-1;i<=n;i++){
for(int j=-1;j<=n;j++){
if(B(i,j))
x[I(i,j)]=(double)(i*i+j*j); ////set boundary condition for x
}
}
/////////////////////////////////////////////////////////////////////////
////initialize x and b
// for(int i=-1;i<=n;i++){
// for(int j=-1;j<=n;j++){
// b[I(i,j)]=4.0; ////set the values for the right-hand side
// // b[I(i,j)]=100*i+j;
// }
// }
// for(int i=-1;i<=n;i++){
// for(int j=-1;j<=n;j++){
// if(B(i,j))
// x[I(i,j)]=(double)(i*i+j*j); ////set boundary condition for x
// // else
// // x[I(i,j)]=100*i+j;
// }
// }
Jacobi_Solver(x,b);
if(verbose){
std::cout<<"\n\nx for Jacobi:\n";
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
std::cout<<x[I(i,j)]<<", ";
}
std::cout<<std::endl;
}
}
std::cout<<"\n\n";
// //////////////////////////////////////////////////////////////////////////
////test Gauss-Seidel
memset(x,0x0000,sizeof(double)*s);
for(int i=-1;i<=n;i++){
for(int j=-1;j<=n;j++){
if(B(i,j))
x[I(i,j)]=(double)(i*i+j*j); ////set boundary condition for x
}
}
Gauss_Seidel_Solver(x,b);
if(verbose){
std::cout<<"\n\nx for Gauss-Seidel:\n";
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
std::cout<<x[I(i,j)]<<", ";
}
std::cout<<std::endl;
}
}
std::cout<<"\n\n";
//////////////////////////////////////////////////////////////////////////
////test Red-Black Gauss-Seidel
memset(x,0x0000,sizeof(double)*s);
for(int i=-1;i<=n;i++){
for(int j=-1;j<=n;j++){
if(B(i,j))
x[I(i,j)]=(double)(i*i+j*j); ////set boundary condition for x
}
}
Red_Black_Gauss_Seidel_Solver(x,b);
if(verbose){
std::cout<<"\n\nx for Red-Black Gauss-Seidel:\n";
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
std::cout<<x[I(i,j)]<<", ";
}
std::cout<<std::endl;
}
}
std::cout<<"\n\n";
// //////////////////////////////////////////////////////////////////////////
delete [] x;
delete [] b;
}
//////////////////////////////////////////////////////////////////////////
__global__ void GPU_Jacobi(double* x, double* ghost, double* b,
double* x_out, double* ghost_out, double* res_out)
{
// shared memory prep, include ghost regions
__shared__ double shared_x[18][18];
// registers prep
double my_b = 0;
double my_res = 0;
int finalwarp_idx = 0;
int absoluteY = blockIdx.y*blockDim.x+threadIdx.y; // not blockDimy to allow for the overlap
int thr_per_row = blockDim.x*gridDim.x;
int absoluteX = blockIdx.x*blockDim.x+threadIdx.x;
double top = 0;
double left = 0;
double right = 0;
double bottom = 0;
int adjust_y = 0;
int adjust_x = 0;
// PHASE ONE: load 18x16 middle columns with aligned, coalesced fetch
shared_x[threadIdx.y][threadIdx.x+1] = x[absoluteY*thr_per_row + absoluteX];
__syncthreads();
// PHASE TWO: half-warps 0-15 fetch global b
// while half-warps 16-17 fetch the ghost columns from ghost
if (threadIdx.y < 16) {
my_b = b[absoluteY*thr_per_row + absoluteX];
} else {
finalwarp_idx = threadIdx.y - 16;
shared_x[threadIdx.x+1][finalwarp_idx*17] = ghost[n*(blockIdx.x*2 + finalwarp_idx*3) +
threadIdx.x + blockDim.x*blockIdx.y];
}
__syncthreads();
// PHASE THREE: half-warps 0-15 use adjusted indexes to fetch from shared into registers,
// and calculate their results
adjust_x = threadIdx.x+1;
adjust_y = threadIdx.y+1;
#pragma unroll
for (int i = 0; i < 16; i++) {
if (threadIdx.y < 16) {
top = shared_x[adjust_y-1][adjust_x];
left = shared_x[adjust_y][adjust_x-1];
right = shared_x[adjust_y][adjust_x+1];
bottom = shared_x[adjust_y+1][adjust_x];
my_res = my_b + top + left + right + bottom;
my_res /= 4;
}
__syncthreads();
// PHASE FOUR:
// write output to shared with adjusted indexes (for phase five to use)
if (threadIdx.y < 16) {
shared_x[adjust_y][adjust_x] = my_res;
}
__syncthreads();
}
// if (threadIdx.y < 16) {
// adjust_x = threadIdx.x+1;
// adjust_y = threadIdx.y+1;
// top = shared_x[adjust_y-1][adjust_x];
// left = shared_x[adjust_y][adjust_x-1];
// right = shared_x[adjust_y][adjust_x+1];
// bottom = shared_x[adjust_y+1][adjust_x];
// my_res = my_b + top + left + right + bottom;
// my_res /= 4;
// }
// __syncthreads();
// if (threadIdx.y < 16) {
// shared_x[adjust_y][adjust_x] = my_res;
// }
// __syncthreads();
// PHASE FIVE:
// half-warps 0-16 write to global memory while 16-17 update ghost buffers
if (threadIdx.y < 16) {
x_out[(absoluteY+1)*thr_per_row + absoluteX] = my_res;
} else {
finalwarp_idx = threadIdx.y - 16;
ghost_out[n*(blockIdx.x*2 + 1+finalwarp_idx) + threadIdx.x + blockDim.x*blockIdx.y] =
shared_x[threadIdx.x+1][1+finalwarp_idx*15];
}
__syncthreads();
//// PHASE SIX, UNUSED
//// half-warps 0-16 calculate residual estimates and write to global memory
// double me = 0;
// if (threadIdx.y < 16) {
// adjust_x = threadIdx.x+1;
// adjust_y = threadIdx.y+1;
// top = shared_x[adjust_y-1][adjust_x];
// left = shared_x[adjust_y][adjust_x-1];
// right = shared_x[adjust_y][adjust_x+1];
// bottom = shared_x[adjust_y+1][adjust_x];
// me = shared_x[adjust_y][adjust_x];
// my_res = 4*me - top - left - right - bottom - my_b;
// res_out[(absoluteY)*thr_per_row + absoluteX] = my_res*my_res;
// }
}
__global__ void GPU_Residual_Helper(double* x, double* ghost, double* b, double* res_out)
{
// shared memory prep, include ghost regions
__shared__ double shared_x[18][18];
// __shared__ double residual[16][16];
// registers prep
double my_b = 0;
double my_res = 0;
int finalwarp_idx = 0;
int absoluteY = blockIdx.y*blockDim.x+threadIdx.y; // not blockDimy to allow for the overlap
int thr_per_row = blockDim.x*gridDim.x;
int absoluteX = blockIdx.x*blockDim.x+threadIdx.x;
double top = 0;
double left = 0;
double right = 0;
double bottom = 0;
double me = 0;
int adjust_y = 0;
int adjust_x = 0;
// PHASE ONE: load 18x16 middle columns with aligned, coalesced fetch
shared_x[threadIdx.y][threadIdx.x+1] = x[absoluteY*thr_per_row + absoluteX];
__syncthreads();
// PHASE TWO: half-warps 0-15 fetch global b
// while half-warps 16-17 fetch the ghost columns from ghost
if (threadIdx.y < 16) {
my_b = b[absoluteY*thr_per_row + absoluteX];
} else {
finalwarp_idx = threadIdx.y - 16;
shared_x[threadIdx.x+1][finalwarp_idx*17] = ghost[n*(blockIdx.x*2 + finalwarp_idx*3) +
threadIdx.x + blockDim.x*blockIdx.y];
}
__syncthreads();
// PHASE THREE: half-warps 0-15 use adjusted indexes to fetch from shared into registers,
// and calculate their results, store to global
if (threadIdx.y < 16) {
adjust_x = threadIdx.x+1;
adjust_y = threadIdx.y+1;
top = shared_x[adjust_y-1][adjust_x];
left = shared_x[adjust_y][adjust_x-1];
right = shared_x[adjust_y][adjust_x+1];
bottom = shared_x[adjust_y+1][adjust_x];
me = shared_x[adjust_y][adjust_x];
my_res = 4*me - top - left - right - bottom - my_b;
res_out[(absoluteY)*thr_per_row + absoluteX] = my_res*my_res;
}
}
////Your implementations end here
//////////////////////////////////////////////////////////////////////////
std::ofstream out;
//////////////////////////////////////////////////////////////////////////
////GPU test function
void Test_GPU_Solver_Jacobi()
{
double* x=new double[s];
memset(x,0x0000,sizeof(double)*s);
double* b=new double[s];
//////////////////////////////////////////////////////////////////////////
////initialize x and b
for(int i=-1;i<=n;i++){
for(int j=-1;j<=n;j++){
// b[I(i,j)]=4.0; ////set the values for the right-hand side
b[I(i,j)]=100*i+j;
}
}
for(int i=-1;i<=n;i++){
for(int j=-1;j<=n;j++){
if(B(i,j))
x[I(i,j)]=(double)(i*i+j*j); ////set boundary condition for x
else
x[I(i,j)]=100*i+j;
}
}
// std::cout<<"\nactual x:\n";
// for(int i=-1;i<n+1;i++){
// for(int j=-1;j<n+1;j++){
// std::cout<<x[I(i,j)]<<", \t";
// }
// std::cout<<std::endl;
// }
// std::cout<<std::endl;
// std::cout<<std::endl;
// std::cout<<"\nactual b:\n";
// for(int i=-1;i<n+1;i++){
// for(int j=-1;j<n+1;j++){
// std::cout<<b[I(i,j)]<<", \t";
// }
// std::cout<<std::endl;
// }
// std::cout<<std::endl;
// std::cout<<std::endl;
// reformat memory to avoid column access
const int my_s = (n+2)*n;
double* x_host = new double[my_s];
for(int i = -1;i <= n;i++){
for(int j = 0;j < n;j++){
int this_i = i+1;
x_host[this_i*n+j] = x[I(i,j)];
// std::cout<<x_host[this_i*n+j]<<", \t";
}
// std::cout<<std::endl;
}
// reformat memory to avoid column access
const int my_b = n*n;
double* b_host = new double[my_b];
for(int i = 0;i < n;i++){
for(int j = 0;j < n;j++){
b_host[i*n+j] = b[I(i,j)];
// std::cout<<x_host[this_i*n+j]<<", \t";
}
// std::cout<<std::endl;
}
// for(int i = 0;i < n+2;i++){
// for(int j = 0;j < n;j++){
// std::cout<<x_host[i*n+j]<<", \t";
// }
// std::cout<<std::endl;
// }
// std::cout<<std::endl;
// std::cout<<std::endl;
// for(int i = 0;i < n;i++){
// for(int j = 0;j < n;j++){
// std::cout<<b_host[i*n+j]<<", \t";
// }
// std::cout<<std::endl;
// }
// std::cout<<std::endl;
// std::cout<<std::endl;
const int ghost_cols = n/8+2;
double* ghost_host = new double[ghost_cols*n];
for (int j = 0; j < n; j++) {
ghost_host[j] = x[I(j,-1)];
ghost_host[(ghost_cols-1)*n+j] = x[I(j,n)];
}
for (int j = 0; j < n; j += 16) {
int col = j/8;
for (int i = 0; i < n; i++) {
// std::cout<<col+1<<" "<<col+2<<" "<<i<<", ";
ghost_host[(col+1)*n+i] = x[I(i,j)];
ghost_host[(col+2)*n+i] = x[I(i,j+15)];
}
// std::cout<<std::endl;
}
// std::cout<<std::endl;
// for (int i = 0; i < ghost_cols; i++) {
// for (int j = 0; j < n; j++) {
// std::cout<<ghost_host[i*n+j]<<", \t";
// }
// std::cout<<std::endl;
// }
// std::cout<<std::endl;
// std::cout<<std::endl;
// double-buffered
double* x_dev[2];
cudaMalloc((void **)&x_dev[0], my_s*sizeof(double));
cudaMemcpy(x_dev[0], x_host, my_s*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void **)&x_dev[1], my_s*sizeof(double));
cudaMemcpy(x_dev[1], x_host, my_s*sizeof(double), cudaMemcpyHostToDevice);
double* ghost_dev[2];
cudaMalloc((void **)&ghost_dev[0], ghost_cols*n*sizeof(double));
cudaMemcpy(ghost_dev[0], ghost_host, ghost_cols*n*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void **)&ghost_dev[1], ghost_cols*n*sizeof(double));
cudaMemcpy(ghost_dev[1], ghost_host, ghost_cols*n*sizeof(double), cudaMemcpyHostToDevice);
// single buffer
double* b_dev;
cudaMalloc((void **)&b_dev, my_b*sizeof(double));
cudaMemcpy(b_dev, b_host, my_b*sizeof(double), cudaMemcpyHostToDevice);
// thrust::device_ptr<double> res_dev=thrust::device_malloc<double>(n*n);
thrust::device_vector<double> res_thrust(n*n, 0);
double* res_raw;
// cudaMalloc((void **)&res_dev, n*n*sizeof(double));
// double* res_host;
cudaEvent_t start,end;
cudaEventCreate(&start);
cudaEventCreate(&end);
float gpu_time=0.0f;
cudaDeviceSynchronize();
cudaEventRecord(start);
//////////////////////////////////////////////////////////////////////////
////TODO 2: call your GPU functions here
////Requirement: You need to copy data from the CPU arrays, conduct computations on the GPU, and copy the values back from GPU to CPU
////The final positions should be stored in the same place as the CPU function, i.e., the array of x
////The correctness of your simulation will be evaluated by the residual (<1e-3)
//////////////////////////////////////////////////////////////////////////
int block_size = 16;
int grid_dim = n/block_size;
// std::std::cout << block_size <<std::endl;
// std::cout <<grid_dim <<std::endl;
int src = 0;
int dest = 1;
int iter_num=0; ////iteration number
int max_num=1e5; ////max iteration number
// int max_num = 10;
double residual=10.0; ////residual
for (; residual > tolerance && iter_num < max_num; iter_num++) {
src = iter_num & 1;
dest = (src + 1) & 1;
GPU_Jacobi<<<dim3(grid_dim, grid_dim), dim3(block_size, block_size+2)>>>(x_dev[src], ghost_dev[src], b_dev, x_dev[dest], ghost_dev[dest], res_raw);
if (iter_num % 100 == 0) {
res_raw=thrust::raw_pointer_cast(res_thrust.data());
GPU_Residual_Helper<<<dim3(grid_dim, grid_dim), dim3(block_size, block_size+2)>>>(x_dev[dest], ghost_dev[dest], b_dev, res_raw);
residual = thrust::reduce(res_thrust.begin(),res_thrust.end(),(double)0,thrust::plus<double>());
if(verbose)std::cout<<"res: "<<residual<<std::endl;
}
}
// std::cout<<"res: "<<residual<<std::endl;
// std::cout << "host "<< x_host[512] <<std::endl;
// std::cout << "original "<<x[I(15,0)] <<std::endl;
// std::cout << "original "<<I(15,0) <<std::endl;
cudaMemcpy(x_host, x_dev[dest], my_s*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(ghost_host, ghost_dev[dest], ghost_cols*n*sizeof(double), cudaMemcpyDeviceToHost);
for (int j = 0; j < n; j++) {
x[I(j,-1)] = ghost_host[j];
x[I(j,n)] = ghost_host[(ghost_cols-1)*n+j];
}
for (int j = 0; j < n; j += 16) {
int col = j/8;
for (int i = 0; i < n; i++) {
// std::cout<<col+1<<" "<<col+2<<" "<<i<<", ";
x[I(i,j)] = ghost_host[(col+1)*n+i];
x[I(i,j+15)] = ghost_host[(col+2)*n+i];
}
// std::cout<<std::endl;
}
for(int i = -1;i <= n;i++){
for(int j = 0;j < n;j++){
int this_i = i+1;
x[I(i,j)] = x_host[this_i*n+j];
// std::cout<<x_host[this_i*n+j]<<", \t";
}
// std::cout<<std::endl;
}
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&gpu_time,start,end);
printf("\nGPU runtime: %.4f ms\n",gpu_time);
cudaEventDestroy(start);
cudaEventDestroy(end);
//////////////////////////////////////////////////////////////////////////
//output x
if(verbose){
std::cout<<"\n\nx for your GPU solver:\n";
for(int i=-1;i<=n;i++){
for(int j=-1;j<=n;j++){
// std::cout<<x[I(i,j)]<<", ";
printf("%.0lf, \t", x[I(i,j)]);
}
std::cout<<std::endl;
}
}
std::cout<<std::endl;
std::cout<<std::endl;
////calculate residual
residual=0.0;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
residual+=pow(4.0*x[I(i,j)]-x[I(i-1,j)]-x[I(i+1,j)]-x[I(i,j-1)]-x[I(i,j+1)]-b[I(i,j)],2);
}
}
std::cout<<"\n\nresidual for your GPU solver: "<<residual<<std::endl;
std::cout<<"GPU Jacobi solver converges in "<<iter_num<<" iterations, with residual "<<residual<<std::endl;
out<<"R0: "<<residual<<std::endl;
out<<"T1: "<<gpu_time<<std::endl;
//////////////////////////////////////////////////////////////////////////
delete [] x;
delete [] b;
}
// Test_GPU_Solver_RB_GaussSeidel()
// {
// double* x=new double[s];
// memset(x,0x0000,sizeof(double)*s);
// double* b=new double[s];
// //////////////////////////////////////////////////////////////////////////
// ////initialize x and b
// for(int i=-1;i<=n;i++){
// for(int j=-1;j<=n;j++){
// // b[I(i,j)]=4.0; ////set the values for the right-hand side
// b[I(i,j)]=100*i+j;
// }
// }
// for(int i=-1;i<=n;i++){
// for(int j=-1;j<=n;j++){
// if(B(i,j))
// x[I(i,j)]=(double)(i*i+j*j); ////set boundary condition for x
// else
// x[I(i,j)]=100*i+j;
// }
// }
// // reformat memory to avoid column access
// const int my_s = (n+2)*n;
// double* x_host = new double[my_s];
// for(int i = -1;i <= n;i++){
// for(int j = 0;j < n;j++){
// int this_i = i+1;
// x_host[this_i*n+j] = x[I(i,j)];
// // std::cout<<x_host[this_i*n+j]<<", \t";
// }
// // std::cout<<std::endl;
// }
// // reformat memory to avoid column access
// const int my_b = n*n;
// double* b_host = new double[my_b];
// for(int i = 0;i < n;i++){
// for(int j = 0;j < n;j++){
// b_host[i*n+j] = b[I(i,j)];
// // std::cout<<x_host[this_i*n+j]<<", \t";
// }
// // std::cout<<std::endl;
// }
// const int ghost_cols = n/8+2;
// double* ghost_host = new double[ghost_cols*n];
// for (int j = 0; j < n; j++) {
// ghost_host[j] = x[I(j,-1)];
// ghost_host[(ghost_cols-1)*n+j] = x[I(j,n)];
// }
// for (int j = 0; j < n; j += 16) {
// int col = j/8;
// for (int i = 0; i < n; i++) {
// // std::cout<<col+1<<" "<<col+2<<" "<<i<<", ";
// ghost_host[(col+1)*n+i] = x[I(i,j)];
// ghost_host[(col+2)*n+i] = x[I(i,j+15)];
// }
// // std::cout<<std::endl;
// }
// // single buffer
// double* x_dev;
// cudaMalloc((void **)&x_dev, my_s*sizeof(double));
// cudaMemcpy(x_dev, x_host, my_s*sizeof(double), cudaMemcpyHostToDevice);
// double* ghost_dev;
// cudaMalloc((void **)&ghost_dev, ghost_cols*n*sizeof(double));
// cudaMemcpy(ghost_dev, ghost_host, ghost_cols*n*sizeof(double), cudaMemcpyHostToDevice);
// // single buffer
// double* b_dev;
// cudaMalloc((void **)&b_dev, my_b*sizeof(double));
// cudaMemcpy(b_dev, b_host, my_b*sizeof(double), cudaMemcpyHostToDevice);
// // thrust::device_ptr<double> res_dev=thrust::device_malloc<double>(n*n);
// thrust::device_vector<double> res_thrust(n*n, 0);
// double* res_raw;
// // cudaMalloc((void **)&res_dev, n*n*sizeof(double));
// // double* res_host;
// cudaEvent_t start,end;
// cudaEventCreate(&start);
// cudaEventCreate(&end);
// float gpu_time=0.0f;
// cudaDeviceSynchronize();
// cudaEventRecord(start);
// int iter_num=0; ////iteration number
// int max_num=1e5; ////max iteration number
// // int max_num = 10;
// double residual=10.0;
// cudaMemcpy(x_host, x_dev[dest], my_s*sizeof(double), cudaMemcpyDeviceToHost);
// cudaMemcpy(ghost_host, ghost_dev[dest], ghost_cols*n*sizeof(double), cudaMemcpyDeviceToHost);
// for (int j = 0; j < n; j++) {
// x[I(j,-1)] = ghost_host[j];
// x[I(j,n)] = ghost_host[(ghost_cols-1)*n+j];
// }
// for (int j = 0; j < n; j += 16) {
// int col = j/8;
// for (int i = 0; i < n; i++) {
// // std::cout<<col+1<<" "<<col+2<<" "<<i<<", ";
// x[I(i,j)] = ghost_host[(col+1)*n+i];
// x[I(i,j+15)] = ghost_host[(col+2)*n+i];
// }
// // std::cout<<std::endl;
// }
// for(int i = -1;i <= n;i++){
// for(int j = 0;j < n;j++){
// int this_i = i+1;
// x[I(i,j)] = x_host[this_i*n+j];
// // std::cout<<x_host[this_i*n+j]<<", \t";
// }
// // std::cout<<std::endl;
// }
// cudaEventRecord(end);
// cudaEventSynchronize(end);
// cudaEventElapsedTime(&gpu_time,start,end);
// printf("\nGPU runtime: %.4f ms\n",gpu_time);
// cudaEventDestroy(start);
// cudaEventDestroy(end);
// //////////////////////////////////////////////////////////////////////////
// //output x
// if(verbose){
// std::cout<<"\n\nx for your GPU solver:\n";
// for(int i=-1;i<=n;i++){
// for(int j=-1;j<=n;j++){
// // std::cout<<x[I(i,j)]<<", ";
// printf("%.0lf, \t", x[I(i,j)]);
// }
// std::cout<<std::endl;
// }
// }
// std::cout<<std::endl;
// std::cout<<std::endl;
// ////calculate residual
// residual=0.0;
// for(int i=0;i<n;i++){
// for(int j=0;j<n;j++){
// residual+=pow(4.0*x[I(i,j)]-x[I(i-1,j)]-x[I(i+1,j)]-x[I(i,j-1)]-x[I(i,j+1)]-b[I(i,j)],2);
// }
// }
// std::cout<<"\n\nresidual for your GPU solver: "<<residual<<std::endl;
// std::cout<<"GPU Red-Black Gauss-Seidel solver converges in "<<iter_num<<" iterations, with residual "<<residual<<std::endl;
// // std::out<<"R0: "<<residual<<std::endl;
// // std::out<<"T1: "<<gpu_time<<std::endl;
// //////////////////////////////////////////////////////////////////////////
// delete [] x;
// delete [] b;
// }
int main()
{
if(name::team=="Team_X"){
printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n");
return 0;
}
std::string file_name=name::team+"_competition_3_linear_solver.dat";
out.open(file_name.c_str());
if(out.fail()){
printf("\ncannot open file %s to record results\n",file_name.c_str());
return 0;
}
#ifdef CPU
Test_CPU_Solvers(); ////You may comment out this line to run your GPU solver only
#endif //CPU
#ifndef CPU
Test_GPU_Solver_Jacobi(); ////Test function for your own GPU implementation
//Test_GPU_Solver_RB_GaussSeidel();
#endif
return 0;
}
|
13,012 | // User define constants
const int method = 1; // 0 = SOR; 1 = CG
const int N_ln = 1024; // grid size
const int bc = 0; // 0: const_bc, 1: one_bc, 2:four_bc, 3:sin_bc
const int source = 1; // 0:background density, 1:point source middle, 2: point source 4th quadrant
const double criteria = 1.0e-14; // convergence criteria
|
13,013 | #include "includes.h"
__global__ void add(int *result, int *num1, int *num2){
*result = *num1 + *num2;
} |
13,014 | #include <utility>
// quicksort
void sort(int * values, int length) {
if(length <= 1) {
return;
}
int pivot = values[0];
int center = 1;
for(int i = 1; i < length; i++) {
if(values[i] < pivot) {
std::swap(values[i], values[center++]);
}
}
values[0] = values[center - 1];
values[center - 1] = pivot;
sort(values, center - 1);
sort(values + center, length - center);
}
// fisher-yates shuffle
void shuffle(int * values, int length) {
for(int i = length - 1; i > 0; i--) {
int j = rand() % (i + 1);
std::swap(values[i], values[j]);
}
}
|
13,015 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
cudaError_t forwardPass(double *x1, double *y1, double *W1,
double *x2, double *y2, double *W2,
int row, int column);
__global__ void vectorMultiplicationKernel(double *x, double *y, double *W,
int row, int column)
{
int tid = blockIdx.x;
if (tid >= row) {
return;
}
double result = 0;
for (int j = 0; j < column; j++) {
result += W[tid * column + j] * x[j];
}
for (int j = 0; j < 10000; j++) {
for (int k = 0; k < 10000; k++) {
result++;
result--;
}
}
y[tid] = result;
}
int main(int argc, char *argv[])
{
int row = atoi(argv[1]);
int column = atoi(argv[2]);
double *W1 = (double*)malloc(row * column * sizeof(double));
double *x1 = (double*)malloc(column * sizeof(double));
double *y1 = (double*)malloc(row * sizeof(double));
double *W2 = (double*)malloc(row * column * sizeof(double));
double *x2 = (double*)malloc(column * sizeof(double));
double *y2 = (double*)malloc(row * sizeof(double));
for (int i = 0; i < column; i++) {
x1[i] = 10;
}
for (int i = 0; i < row * column; i++) {
W1[i] = 10;
}
for (int i = 0; i < column; i++) {
x2[i] = 10;
}
for (int i = 0; i < row * column; i++) {
W2[i] = 10;
}
cudaError_t cudaStatus = forwardPass(x1, y1, W1, x2, y2, W2, row, column);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "vectorMultiplicationWithCuda failed!");
return 1;
}
for (int i = 0; i < row; i++) {
printf("%.2f ", y1[i]);
}
printf("\n");
for (int i = 0; i < row; i++) {
printf("%.2f ", y2[i]);
}
printf("\n");
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
cudaError_t forwardPass(double *x1, double *y1, double *W1,
double *x2, double *y2, double *W2,
int row, int column)
{
double *dev_x1 = 0;
double *dev_y1 = 0;
double *dev_W1 = 0;
double *dev_x2 = 0;
double *dev_y2 = 0;
double *dev_W2 = 0;
cudaError_t cudaStatus;
// Allocate GPU buffers for three vectors (two input, one output) .
cudaMalloc((void**)&dev_x1, column * sizeof(double));
cudaMalloc((void**)&dev_y1, row * sizeof(double));
cudaMalloc((void**)&dev_W1, row * column * sizeof(double));
cudaMemcpy(dev_x1, x1, column * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_W1, W1, row * column * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_x2, column * sizeof(double));
cudaMalloc((void**)&dev_y2, row * sizeof(double));
cudaMalloc((void**)&dev_W2, row * column * sizeof(double));
cudaMemcpy(dev_x2, x2, column * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_W2, W2, row * column * sizeof(double), cudaMemcpyHostToDevice);
//Launch a kernel on the GPU with one thread for each element.
cudaStream_t s1;
cudaStream_t s2;
cudaStreamCreate(&s1);
vectorMultiplicationKernel<<<row, 1, 0, s1>>>(dev_x1, dev_y1, dev_W1, row, column);
cudaStreamCreate(&s2);
vectorMultiplicationKernel<<<row, 1, 0, s2>>>(dev_x2, dev_y2, dev_W2, row, column);
// vectorMultiplicationKernel<<<row, 1>>>(dev_x1, dev_y1, dev_W1, row, column);
// vectorMultiplicationKernel<<<row, 1>>>(dev_x2, dev_y2, dev_W2, row, column);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(y1, dev_y1, row * sizeof(double), cudaMemcpyDeviceToHost);
cudaStatus = cudaMemcpy(y2, dev_y2, row * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dev_x1);
cudaFree(dev_y1);
cudaFree(dev_W1);
cudaFree(dev_x2);
cudaFree(dev_y2);
cudaFree(dev_W2);
return cudaStatus;
}
|
13,016 | #include "includes.h"
__global__ void kernelGPU(float *R,float* G,float* B,float* Rin,float*Gin,float*Bin,int M,int N,int L){
int tId= threadIdx.x+blockIdx.x*blockDim.x;
int i;
if(tId<M*N){
R[tId]=0;
G[tId]=0;
B[tId]=0;
for(i=0; i<L; ++i ){
R[tId]+= Rin[tId+i*M*N];
G[tId]+= Gin[tId+i*M*N];
B[tId]+= Bin[tId+i*M*N];
}
R[tId]=R[tId]/L;
G[tId]=G[tId]/L;
B[tId]=B[tId]/L;
}
} |
13,017 | #include "includes.h"
__global__ void reduce(int *g_idata, int *g_odata) {
__shared__ int sdata[256];
// each thread loads one element from global to shared mem
// note use of 1D thread indices (only) in this kernel
int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[threadIdx.x] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (int s = 1; s < blockDim.x; s *= 2)
{
int index = 2 * s * threadIdx.x;;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (threadIdx.x == 0)
atomicAdd(g_odata, sdata[0]);
} |
13,018 |
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <cuda.h>
int main(int argc, char *argv[]) {
int num=25;
CUdeviceptr loc[num];
for(int i=0; i<num; i++){
std::cout << "i: " << i << std::endl;
cuMemAlloc(&loc[i], i);
}
for(int i=0; i<num; i++){
cuMemFree(loc[i]);
}
return 0;
}
|
13,019 | #include <iostream>
#include <time.h>
#include <stdio.h>
#define gpuErrchck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
__global__ void gpu_add(int n, int *a_device, int *b_device, int *c_device)
{
int bsize = blockDim.x;
int tid = threadIdx.x;
int i = blockIdx.x * bsize + tid;
if( i < n )
c_device[i] = a_device[i] + b_device[i];
}
void cpu_add(int n, int *a_host, int *b_host, int *c_host)
{
int i;
for(i=0;i<n;++i)
c_host[i] = a_host[i] + b_host[i];
}
int main()
{
int N = 100000;
int *a_host = new int[N];
int *b_host = new int[N];
int *c_host = new int[N];
int *a_device = NULL;
int *b_device = NULL;
int *c_device = NULL;
gpuErrchck( cudaMalloc((void**)&a_device,N*sizeof(int)) );
gpuErrchck( cudaMalloc((void**)&b_device,N*sizeof(int)) );
gpuErrchck( cudaMalloc((void**)&c_device,N*sizeof(int)) );
int i ;
for(i=0;i<N;++i){
a_host[i] = 1;
b_host[i] = 2;
}
clock_t start, end;
double cpu_time_used;
gpuErrchck( cudaMemcpy((void*)a_device, (void*)a_host, N*sizeof(int), cudaMemcpyHostToDevice) );
gpuErrchck( cudaMemcpy((void*)b_device, (void*)b_host, N*sizeof(int), cudaMemcpyHostToDevice) );
gpuErrchck( cudaDeviceSynchronize() );
start = clock();
gpu_add<<< N/1000,1000 >>>(N,a_device,b_device,c_device);
gpuErrchck( cudaDeviceSynchronize() );
end = clock();
gpuErrchck( cudaMemcpy((void*)c_host, (void*)c_device, N*sizeof(int), cudaMemcpyDeviceToHost) );
gpuErrchck( cudaDeviceSynchronize() );
cout<<"GPU result: "<<c_host[1]<<endl;
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
cout<<"GPU Time used: "<<cpu_time_used<<endl;
start = clock();
cpu_add(N,a_host,b_host,c_host);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
cout<<"CPU Time used: "<<cpu_time_used<<endl;
cout<<"CPU result: "<<c_host[1]<<endl;
delete[] a_host, b_host, c_host;
cudaFree((void*)a_device);
cudaFree((void*)b_device);
cudaFree((void*)c_device);
return 0;
}
|
13,020 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define BLOCKS 4
#define THREADS 4
__global__ void add(int *x, int *y, int *result) {
*result = *x + *y;
};
int main(int argc, char *argv[]) {
if(argc < 2)
{
printf("need two parameters retard\n");
return 0;
}
int x = atoi(argv[1]);
int y = atoi(argv[2]);
int *x_d, *y_d, *sum_d;
cudaMalloc((void**) &x_d, sizeof(int));
cudaMemcpy(x_d, &x, sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**) &y_d, sizeof(int));
cudaMemcpy(y_d, &y, sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**) &sum_d, sizeof(int));
add<<<BLOCKS,THREADS>>>(x_d, y_d, sum_d);
int sum;
cudaMemcpy(&sum, sum_d, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n", sum);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(sum_d);
};
|
13,021 | #include "includes.h"
__global__ void kernel_test0_write(char* _ptr, char* end_ptr)
{
unsigned int* orig_ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);;
unsigned int* ptr = orig_ptr;
if (ptr >= (unsigned int*) end_ptr) {
return;
}
unsigned int* block_end = orig_ptr + BLOCKSIZE/sizeof(unsigned int);
unsigned int pattern = 1;
unsigned long mask = 4;
*ptr = pattern;
while(ptr < block_end){
ptr = (unsigned int*) ( ((unsigned long)orig_ptr) | mask);
if (ptr == orig_ptr){
mask = mask <<1;
continue;
}
if (ptr >= block_end){
break;
}
*ptr = pattern;
pattern = pattern << 1;
mask = mask << 1;
}
return;
} |
13,022 | #include "includes.h"
#define FALSE 0
#define TRUE 1
// returns random integer from 1 to lim
__global__ void rand_init(long *a,long seed)
{
int tid=threadIdx.x+blockDim.x*blockIdx.x;
//long a = 100001;
a[tid] = seed + tid;
} |
13,023 | #include "includes.h"
__global__ void LoadAddVecSecond(float *vector , float2 *FFT) {
int idx = threadIdx.x + blockIdx.x*blockDim.x; // this should span the full range of the vector
FFT[idx].x *= vector[idx]/sqV;
FFT[idx].y *= vector[idx]/sqV;
} |
13,024 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
/**
* Element-wise Vector Multiplication: C[i] = A[i] * B[i].
* This sample is a very basic sample that implements element by element vector multiplication.
*/
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
/**
* CUDA Kernel Device code
* Computes the element-wise vector multiplication of A and B into C. The 3 vectors have the same number of
elements numElements.
*/
__global__ void vectorMultiply(float *A, float *B, float *C, int numElements)
{
int size = numElements * sizeof(float);
float *d_A, *d_B, *d_C;
int i = threadIdx.x + 2* blockDim.x*blockIdx.x;
if (i < numElements) C[i] = A[i] * B[i];
if (i < numElements + blockDim.x) C[i + blockDim.x] = A[i + blockDim.x] + B[i + blockDim.x];
}
//Host main routine
int main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
float EPS = 0.00001;
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector multiplication of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; i++)
{
*(h_A + i) = (float)i;
//printf("h_A = %f\n", h_A[i]);
}
for (int i = 0; i < numElements; i++)
*(h_B + i) = (1 / (EPS + i));
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in device memory
printf("Copy input data from the host memory to the CUDA device\n");
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Launch the VectorMultiply CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = ceil(numElements / (float) threadsPerBlock);
vectorMultiply <<< blocksPerGrid, threadsPerBlock >>>(d_A, d_B, d_C, numElements);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs((h_A[i] * h_B[i]) - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
} |
13,025 | #include <stdio.h>
#include <ctime>
#include <stdlib.h>
__global__
void kmeanspredict(int n, int nofc, float *x, float *y, float* model, float* label)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= n)
return;
int iter;
double dist = 9999;
double tmp = 0;
double res = 2;
for (iter = 0; iter < nofc; iter++) {
tmp = sqrt((x[i] - model[iter]) * (x[i] - model[iter]) +
(y[i] - model[iter + nofc]) * (y[i] - model[iter + nofc]));
res = tmp < dist ? iter : res;
dist = tmp < dist ? tmp : dist;
}
label[i] = res;
}
int main(void)
{
int N = 100000000;
float *feature1, *feature2, *d_feature1, *d_feature2, *d_model;
float *label, *d_label;
feature1 = (float*)malloc(N*sizeof(float));
feature2 = (float*)malloc(N*sizeof(float));
label = (float*)malloc(N*sizeof(int));
cudaMalloc(&d_feature1, N*sizeof(float));
cudaMalloc(&d_feature2, N*sizeof(float));
cudaMalloc(&d_label, N*sizeof(float));
float model[] = {0.79314066 , 0.40563098, 0.27847279,
0.27847279, 0.8073302, 0.28528738};
cudaMalloc(&d_model, 6*sizeof(float));
std::srand(time(NULL));
for (int i = 0; i < N; i++) {
feature1[i] = rand();
feature2[i] = rand();
}
clock_t begin = std::clock();
cudaMemcpy(d_feature1, feature1, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_feature2, feature2, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_model, model, 6*sizeof(float), cudaMemcpyHostToDevice);
kmeanspredict<<<(N+255)/256, 256>>>(N, 3, d_feature1, d_feature2, d_model, d_label);
cudaMemcpy(feature2, d_feature2, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(label, d_label, N*sizeof(float), cudaMemcpyDeviceToHost);
clock_t end = std::clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
printf("measured time: %lf\n", elapsed_secs);
cudaFree(d_feature1);
cudaFree(d_feature2);
cudaFree(d_label);
free(feature1);
free(feature2);
free(label);
}
|
13,026 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <iostream>
int main() {
thrust::host_vector<float> h_x(3);
h_x[0] = 22;
h_x[1] = 22;
h_x[2] = 22;
thrust::host_vector<float> h_y(3);
h_y[0] = 44;
h_y[1] = 44;
h_y[2] = 44;
thrust::device_vector<float> d_x(3);
d_x = h_x;
thrust::device_vector<float> d_y(3);
d_y = h_y;
thrust::device_vector<float> d_z(3);
// z = x + y
thrust::transform(d_x.begin(), d_x.end(), d_y.begin(), d_z.begin(), thrust::plus<float>());
thrust::host_vector<float> h_z(3);
h_z = d_z;
std::cout << h_z[0] << ", " << h_z[1] << ", " << h_z[2] << std::endl;
return 0;
} |
13,027 | #include "includes.h"
__global__ void d_MM_OPT( float *a, float *b, float *c, int wA, int wB, int hA)
{
#define blockTile 16
/* Blocksize is 16x16 */
/* Allocate shared memory */
__shared__ float aBlock[blockTile][blockTile];
__shared__ float bBlock[blockTile][blockTile];
/* Calculate global index X, Y */
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int gx = blockDim.x * bx + tx; // column
int gy = blockDim.y * by + ty; // row
/* Compute offset idx for A & B */
// First A index (row shift) Block.row * Block.width * A.width
int a0 = wA * 16 * by;
// aBegin -> last element in row -> + width - 1
int aZ = a0 + wA - 1;
// Column block iteration = blockDim.x
int aD = 16;
// b_0 -> Column Shift
int b0 = 16 * bx;
// Row block iteration = blockDim.y * width B
int bD = 16 * wB;
float sum = 0.f;
for(int aI = a0, bI = b0; aI <= aZ; aI += aD, bI += bD)
{
/* Assign shared memory and sync */
/* Warning, wA*gidy may be out of bounds */
aBlock[ty][tx] = a[aI + ty*wA + tx];
bBlock[ty][tx] = b[bI + ty*wB + tx];
/* Make sure all of the threads have updated the memory cache */
__syncthreads();
/* Sum over NK */
for(int k=0; k < 16; k++)
{
/* C = (A x B) */
sum += aBlock[ty][k] * bBlock[k][tx];
}
}
c[gy*wB + gx] = sum;
//c[i * NJ + j] = ALPHA*sum + BETA*c[i * NJ + j];
} |
13,028 | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
#define WIDTH 6
#define HEIGHT 2
#define CHECK_CUDART(x) do { \
cudaError_t res = (x); \
if(res != cudaSuccess) { \
fprintf(stderr, "CUDART: %s = %d (%s) at (%s:%d)\n", #x, res, cudaGetErrorString(res),__FILE__,__LINE__); \
exit(1); \
} \
} while(0)
__global__ void printGpu_tex(cudaTextureObject_t tex) {
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(tidx < WIDTH && tidy < HEIGHT){
float x = tex2D<float>(tex, tidx, tidy);
//float x = tex2D<float>(tex, float(tidx)+0.5, float(tidy)+0.5);
printf("tex2D<float>(tex, %d, %d) = %f \n", tidx, tidy, x);
}
}
__global__ void printGpu_vanilla(float* d_buffer, int pitch) {
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(tidx < WIDTH && tidy < HEIGHT){
float x = d_buffer[tidy*pitch + tidx];
printf("d_buffer[%d][%d] = %f \n", tidy, tidx, x);
}
}
int main() {
int width = WIDTH;
int height = HEIGHT;
float h_buffer[12] = {1,2,3,4,5,6,7,8,9,10,11,12};
float* d_buffer;
size_t pitch;
CHECK_CUDART(cudaMallocPitch(&d_buffer, &pitch, sizeof(float)*width, height));
CHECK_CUDART(cudaMemset2D(d_buffer, pitch, 0, pitch, height));
CHECK_CUDART(cudaMemcpy2D(d_buffer, pitch, &h_buffer, sizeof(float)*width, sizeof(float)*width, height, cudaMemcpyHostToDevice));
printf("pitch = %d \n", pitch);
//CUDA 5 texture objects: https://developer.nvidia.com/content/cuda-pro-tip-kepler-texture-objects-improve-performance-and-flexibility
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = d_buffer;
resDesc.res.pitch2D.pitchInBytes = pitch;
resDesc.res.pitch2D.width = width;
resDesc.res.pitch2D.height = height;
resDesc.res.pitch2D.desc.f = cudaChannelFormatKindFloat;
resDesc.res.pitch2D.desc.x = 32; // bits per channel
resDesc.res.pitch2D.desc.y = 0;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaTextureObject_t tex;
CHECK_CUDART(cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL));
dim3 grid(1, 1, 1); //assume one small block
dim3 block(WIDTH, HEIGHT, 1);
printGpu_tex<<<grid, block>>>(tex);
CHECK_CUDART(cudaGetLastError());
printGpu_vanilla<<<grid, block>>>(d_buffer, pitch/sizeof(float));
CHECK_CUDART(cudaGetLastError());
cudaDestroyTextureObject(tex);
cudaFree(d_buffer);
}
|
13,029 | #include <stdio.h>
__device__
void saxpy(void *input){
//void saxpy(int num_threads, int n, float a, float *d_x, float *d_y){
// unbox input
/*
int num_threads = 32;
int i = threadIdx.x;
// This loop performs 3 floating point ops per iteration.
while(i<n){
d_y[i] = a*d_x[i] + d_y[i];
i = i+num_threads;
}
*/
}
|
13,030 | #include <stdio.h>
#define N 10
__global__ void addvec(int *a, int *b, int *c)
{
int tid=threadIdx.x; // Manejar todos los datos con este índice
if(tid<N)
{
c[tid]=a[tid]+b[tid];
}
}
int main(void)
{
int a[N], b[N], c[N],i;
int *dev_a, *dev_b, *dev_c;
//Asignación de espacio en la memoria de GPU
cudaMalloc((void**)&dev_a,N*sizeof(int));
cudaMalloc((void**)&dev_b,N*sizeof(int));
cudaMalloc((void**)&dev_c,N*sizeof(int));
//Inicializar los datos originales en el CPU
for (i = 0; i < N; i++)
{
a[i]=i*2;
b[i]=i*2+1;
}
//Copia de vectores a la GPU
cudaMemcpy(dev_a,a,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,N*sizeof(int),cudaMemcpyHostToDevice);
//Se lanza el kernel
addvec<<<1,N>>>(dev_a,dev_b,dev_c);
//Se recuperan los datos de la GPU
cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost);
//Se muestra el resultado
for (i = 0; i < N; i++)
{
printf("%d+%d=%d\n",a[i],b[i],c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
13,031 | #include "includes.h"
__global__ void bottomBoundaryKernel(double* temperature, int block_size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < block_size) {
temperature[(block_size + 2) * (block_size + 1) + (1 + i)] = 1.0;
}
} |
13,032 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <cmath>
#include <ctime>
const int chunkCount = 1 << 20; // 2^20 ~ 10^6
const int totalCount = chunkCount << 3; // 2^23 ~ 8*10^6
// add two numbers together and take error function of result, store in array
__global__ void kernel(float* a, float* b, float* c) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < chunkCount)
c[tid] = erff(a[tid] + b[tid]);
}
int main() {
// get device properties
cudaDeviceProp prop;
int device;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
// if device overlap is not possible, we can't do this demo
if (!prop.deviceOverlap) {
printf("Device does not have GPU_OVERLAP\n");
exit(0);
}
// initialize events
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
// initialize streams
// *** note that we have TWO streams now ***
cudaStream_t stream1;
cudaStreamCreate(&stream1);
cudaStream_t stream2;
cudaStreamCreate(&stream2);
// declare host/device arrays
// since we have two streams (and don't want thread collisions),
// we'll need two copies of each device array
float *ha, *hb, *hc, *d1a, *d1b, *d1c, *d2a, *d2b, *d2c;
// allocate memory
const int totalSize = totalCount * sizeof(float);
const int chunkSize = chunkCount * sizeof(float);
cudaMalloc(&d1a, chunkSize);
cudaMalloc(&d1b, chunkSize);
cudaMalloc(&d1c, chunkSize);
cudaMalloc(&d2a, chunkSize);
cudaMalloc(&d2b, chunkSize);
cudaMalloc(&d2c, chunkSize);
// use pinned memory here for faster data transfer.
// we will be doing multiple transfers because of the
// chunking, so it will be worth the allocation overhead.
cudaHostAlloc(&ha, totalSize, cudaHostAllocDefault);
cudaHostAlloc(&hb, totalSize, cudaHostAllocDefault);
cudaHostAlloc(&hc, totalSize, cudaHostAllocDefault);
// fill a and b with some random values
srand((unsigned) time(0));
for (int i=0; i < totalCount; i++) {
// generate random numbers between [0,1]
ha[i] = rand()/RAND_MAX;
hb[i] = rand()/RAND_MAX;
}
// start recording event stream
cudaEventRecord(start, stream1);
// split data into chunks and iterate over two chunks at a time (interleaving the two streams)
for (int i=0; i<totalCount; i+=2*chunkCount) {
int i1 = i;
int i2 = i + chunkCount;
cudaMemcpyAsync(d1a, ha+i1, chunkSize, cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(d2a, ha+i2, chunkSize, cudaMemcpyHostToDevice, stream2);
cudaMemcpyAsync(d1b, ha+i1, chunkSize, cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(d2b, ha+i2, chunkSize, cudaMemcpyHostToDevice, stream2);
kernel<<<chunkCount/64,64,0,stream1>>>(d1a, d1b, d1c);
kernel<<<chunkCount/64,64,0,stream2>>>(d2a, d2b, d2c);
cudaMemcpyAsync(hc+i1, d1c, chunkSize, cudaMemcpyDeviceToHost);
cudaMemcpyAsync(hc+i2, d2c, chunkSize, cudaMemcpyDeviceToHost);
}
// wait until streams reach here, record end event
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
cudaEventRecord(end);
cudaEventSynchronize(end);
// get total elapsed time
float elapsed;
cudaEventElapsedTime(&elapsed, start, end);
// print results
printf("This took %f ms\n", elapsed);
// free memory
cudaFreeHost(ha);
cudaFreeHost(hb);
cudaFreeHost(hc);
cudaFree(d1a);
cudaFree(d1a);
cudaFree(d1b);
cudaFree(d2c);
cudaFree(d2b);
cudaFree(d2c);
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
}
// executing this on my device results in:
// This took 8.091616 ms
// compare this with the single-stream application, which had an
// elapsed time of 8.373248 ms (3.4% reduction). it isn't a super
// crazy speedup, but it is a speedup nonetheless thanks to streams |
13,033 | #include "includes.h"
__global__ void vevAdd(int N, float *a, float *b, float *c)
{
// work idex, 在launch kernel的时候指定维度
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
{
c[idx] = a[idx] + b[idx];
}
} |
13,034 | #include "includes.h"
__global__ void GatherBackwardFuseSgdKernel(const float* grads, int64_t num_features, int embed_size, int batch_size, int query_nnz, const int64_t* indices, float lr, float* params) {
int tid = threadIdx.x, bid = blockIdx.x;
extern __shared__ int shmem_indices[];
for (int i = tid; i < query_nnz; i += blockDim.x) {
shmem_indices[i] = indices[query_nnz * bid + i];
}
__syncthreads();
#pragma unroll
for (int i = 0; i < query_nnz; ++i) {
atomicAdd(¶ms[(int64_t)shmem_indices[i] * embed_size + tid],
-lr * grads[(bid * query_nnz + i) * embed_size + tid]);
}
} |
13,035 | // * http://llpanorama.wordpress.com/2008/05/21/my-first-cuda-program/
/*square array on GPU Written by me */
#include<stdio.h>
#include<cuda.h>
#include<time.h>
/******************************************************************/
//kernel that execute on the CUDA device "Tesla" for example
/******************************************************************/
__global__ void square_array(float *a, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N) a[idx]= a[idx] * a[idx];
}
/******************************************************************/
//main function that executes the host : the CPU
/******************************************************************/
int main(void) {
//for time mesurement
clock_t start, end;
double elapsed;
start = clock();
float *a_h; //I point the host array
float *a_d; //I point the device array
const int N=10000; //Number of the elements in the array
size_t size = N * sizeof (float);
a_h = (float *)malloc(size); //I allocate the array on the host(allocation on CPU)
cudaMalloc((void **)&a_d, size);//I allocate the array on the device (allocation on GPU)
for (int i=0;i<N;i++)
a_h[i] = (float)i;//loop for initializing the array on the CPU
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);//copy array elements from CPU to GPU
//do Calulation on GPU
int block_size = 4;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
//Call to kernel function
square_array <<< n_blocks, block_size >>> (a_d, N);
//Bring back results to host array
cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost);
//Print the result array
for (int i=0;i< N;i++)
printf("%d %f\n ", i, a_h[i] );
//Free host and device arrays
free(a_h);
cudaFree(a_d);
//get time
end = clock();
elapsed = ((double) end - start) / CLOCKS_PER_SEC;
printf("%.2f secondes entre start et end. \n", elapsed);
}
|
13,036 | #include "knn.cuh"
#include <iostream>
__global__ void compute_nearest_neighbors(const int* histo_tab,
const size_t histo_pitch,
const float* clusters,
const size_t cluster_pitch,
int* results,
const size_t tiles_number)
{
size_t x = threadIdx.x;
size_t tile_index = blockIdx.x;
if (x >= 256 || tile_index >= tiles_number)
return;
__shared__ float cluster_distances[16];
for (auto y = 0; x == 0 && y < 16; y++)
cluster_distances[y] = 0;
__syncthreads();
int value = *(histo_tab + tile_index * histo_pitch / sizeof(int) + x);
for (auto y = 0; y < 16; y++)
{
float cluster_value = *(clusters + y * cluster_pitch / sizeof(float) + x);
// Euclidean distance
float local_distance = (cluster_value - value) * (cluster_value - value);
atomicAdd(cluster_distances + y, local_distance);
}
__syncthreads();
if (x != 0)
return;
auto result_ptr = results + tile_index;
*result_ptr = 0;
for (int i = 1; i < 16; i++)
{
if (cluster_distances[*result_ptr] > cluster_distances[i])
*result_ptr = i;
}
}
int* k_nearest_neighbors(const int* histo_tab, const float* clusters, const size_t tiles_number)
{
cudaError_t rc = cudaSuccess;
int* cuda_histo_tab;
size_t cuda_histo_tab_pitch;
auto tile_byte_size = 256 * sizeof(int);
rc = cudaMallocPitch(&cuda_histo_tab, &cuda_histo_tab_pitch, tile_byte_size, tiles_number);
if (rc)
{
std::cout << "Could not allocate memory for tiles histogram"
<< "on the device when computing nearest_neighbors\n";
exit(EXIT_FAILURE);
}
rc = cudaMemcpy2D(cuda_histo_tab, cuda_histo_tab_pitch, histo_tab, tile_byte_size,
tile_byte_size, tiles_number, cudaMemcpyHostToDevice);
if (rc)
{
std::cout << "Could not copy memory for tiles histogram"
<< "on the device when computing nearest_neighbors\n";
exit(EXIT_FAILURE);
}
float* cuda_clusters;
size_t cuda_clusters_pitch;
auto cluster_byte_size = 256 * sizeof(float);
rc = cudaMallocPitch(&cuda_clusters, &cuda_clusters_pitch, cluster_byte_size, 16);
if (rc)
{
std::cout << "Could not allocate memory for clusters"
<< "on the device when computing nearest neighbors\n";
exit(EXIT_FAILURE);
}
rc = cudaMemcpy2D(cuda_clusters, cuda_clusters_pitch, clusters, cluster_byte_size,
cluster_byte_size, 16, cudaMemcpyHostToDevice);
if (rc)
{
std::cout << "Could not copy memory for clusters"
<< "on the device when computing nearest neighbors\n";
exit(EXIT_FAILURE);
}
int* result;
rc = cudaMalloc(&result, sizeof(int) * tiles_number);
if (rc)
{
std::cout << "Could not allocate memory for nearest neighbors result"
<< "on the device when computing nearest neighbors\n";
exit(EXIT_FAILURE);
}
dim3 block_dim(256, 16);
compute_nearest_neighbors<<<tiles_number, 256>>>(cuda_histo_tab, cuda_histo_tab_pitch,
cuda_clusters, cuda_clusters_pitch, result, tiles_number);
int* output = (int*) malloc(sizeof(int) * tiles_number);
rc = cudaMemcpy(output, result, sizeof(int) * tiles_number, cudaMemcpyDeviceToHost);
cudaFree(cuda_clusters);
cudaFree(result);
return output;
}
|
13,037 | #include "includes.h"
__global__ void kernel(int* D, int* q, int k){
// Find index of i row and j column of the distance array
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(D[i * N + j] > D[i * N + k] + D[k * N + j])
{
D[i * N + j] = D[i * N + k] + D[k * N + j];
q[i * N + j] = k;
}
} |
13,038 | // Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel( double * P ,const double * M, const double * N , const int Mat1_Width, const int Mat1_Height, const int Mat2_Width, const int Mat2_Height , const int Mat3_Width)
{
int Row = blockIdx.y*blockDim.y+threadIdx.y;
int Col = blockIdx.x*blockDim.x+threadIdx.x;
double Pvalue = 0.0;
if((Row < Mat1_Height) && (Col < Mat2_Width))
{
for (int k = 0; k < Mat1_Width ; ++k)
{
Pvalue += M[Row*Mat1_Width + k] * N[k*Mat2_Width + Col];
}
P[Col*Mat3_Width + Row] = Pvalue;
}
} |
13,039 | #include "cooktorrance_implement.h"
#include "brdf_common.h"
__global__ void
cooktorrance_kernel(float3* pos, unsigned int width, float3 V, float3 N, float m, float f0)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float3 L = calculateL(pos, width, x, y);
float3 H = normalize(add(L, V));
float NdotH = dot(N, H);
float VdotH = dot(V, H);
float NdotL = dot(N, L);
float NdotV = dot(N, V);
float D = beckmann(m, NdotH);
float F = fresnel(f0, VdotH);
NdotH = NdotH + NdotH;
float G = min(2.f * NdotH * NdotL / VdotH, 2.f * NdotH * NdotV / VdotH);
G = min(1.f, G);
float val = D * G * F / NdotL / NdotV / 3.1415927f;
pos[y*width+x] = scale(L, val);
}
extern "C" void cooktorrance_brdf(float3 *pos, unsigned numVertices, unsigned width, float3 V, float3 N, float m, float f0)
{
dim3 block(8, 8, 1);
unsigned height = numVertices / width;
dim3 grid(width / block.x, height / block.y, 1);
cooktorrance_kernel<<< grid, block>>>(pos, width, V, N, m, f0);
}
|
13,040 | #include "includes.h"
__global__ void transpose_kernel(size_t sz, float_t* src, float_t* dest, size_t ld_src, size_t ld_dest)
{
size_t index = blockIdx.x*blockDim.x + threadIdx.x;
size_t i = index/ld_src, j= index%ld_src;
size_t dest_index = j*ld_dest + i;
if(index < sz)
{
dest[dest_index] = src[index];
}
} |
13,041 | #include <cstdlib>
#include <ctime>
#include <cstdio>
#include <vector>
#include <chrono>
using namespace std;
const int n = 10000;
const int n_feature = 128;
const double p = .1;
const int max_e = (int)(n * n * p * 3);
size_t f_size = n * n_feature * sizeof(float);
vector<int> adjacent_matrix[n];
float f1[n * n_feature], f2[n * n_feature];
float *d_f1, *d_f2;
int indices[max_e], values[max_e];
int *d_indices, *d_values;
void prepareData() {
srand(time(0));
const int k = 1e6;
int tote(0);
for (int i = 0; i < n; ++i) {
indices[i] = tote;
for (int j = 0; j < n; ++j) {
if (rand() % k < p * k) {
values[tote++] = j;
}
}
for (int j = 0; j < n_feature; ++j) {
f1[i * n_feature + j] = rand() % k / (float)k;
f2[i * n_feature + j] = 0;
}
}
indices[n] = tote;
printf("Graph edges %d\n", tote);
cudaMalloc(&d_indices, (n + 1)* sizeof(int));
cudaMemcpy(d_indices, indices, (n + 1)* sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&d_values, tote * sizeof(int));
cudaMemcpy(d_values, values, tote * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&d_f1, f_size);
cudaMemcpy(d_f1, f1, f_size, cudaMemcpyHostToDevice);
cudaMalloc(&d_f2, f_size);
cudaMemset(d_f2, 0, f_size);
cudaDeviceSynchronize();
}
inline double getDuration(std::chrono::time_point<std::chrono::system_clock> a,
std::chrono::time_point<std::chrono::system_clock> b) {
return std::chrono::duration<double>(b - a).count();
}
#define timestamp(__var__) auto __var__ = std::chrono::system_clock::now();
__global__
void graph_add(int n_feature, int n, int* indices, int* values, float* f1, float* f2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float l_f2[::n_feature];
if (i < n) {
for (int k = 0; k < n_feature; ++k) {
l_f2[k] = 0;
}
for (int j = indices[i]; j < indices[i + 1]; ++j) {
for (int k = 0; k < n_feature; ++k) {
l_f2[k] += f1[values[j] * n_feature + k];
}
}
for (int k = 0; k < n_feature; ++k) {
f2[i * n_feature + k] = l_f2[k];
}
}
}
double runOnce() {
timestamp(t1);
graph_add<<<128, 128>>>(n_feature, n, d_indices, d_values, d_f1, d_f2);
cudaDeviceSynchronize();
timestamp(t2);
cudaMemcpy(f2, d_f2, f_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
float sum(0);
for (int i = 0; i < n; ++i) {
for (int k = 0; k < n_feature; ++k) {
sum += f2[i * n_feature + k];
}
}
fprintf(stderr, "F20 %f Check sum %f\n", f2[0], sum);
return getDuration(t1, t2);
}
int main() {
prepareData();
double total_time = 0;
int times = 10;
fprintf(stderr, "Ready\n");
for (int i = 0; i < times; ++i) {
total_time += runOnce();
}
fprintf(stderr, "Avg time %.9lf s\n", total_time / times);
}
|
13,042 | #include <stdio.h>
#include <stdlib.h>
int main(void) {
int *ptr = 0;
// gimme!
cudaError_t error = cudaMalloc((void **)&ptr, UINT_MAX);
if (error != cudaSuccess) {
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
return 0;
}
|
13,043 | #include <stdio.h>
unsigned int N = 1 << 12;
unsigned int N_p = N/4;
__global__
void mul(unsigned int n, unsigned int *x, unsigned int *y)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = x[i] * y[i];
}
int main(void)
{
unsigned int /**x, *y,*/ *d_x, *d_y;
int8_t *x, *y;
x = (int8_t*)malloc(N*sizeof(int8_t));
y = (int8_t*)malloc(N*sizeof(int8_t));
cudaMalloc(&d_x, N_p*sizeof(unsigned int));
cudaMalloc(&d_y, N_p*sizeof(unsigned int));
for (unsigned int i = 0; i < N; i++) {
x[i] = i%16;
y[i] = i%16;
}
cudaMemcpy(d_x, (unsigned int*) x, N_p*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, (unsigned int*) y, N_p*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
// Perform SAXPY on 1M elements
mul<<<(N_p+255)/256, 256>>>(N_p, d_x, d_y);
cudaDeviceSynchronize();
cudaMemcpy(y, d_y, N*sizeof(int8_t), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
y = (int8_t*) y;
int8_t maxError = 0;
for (unsigned int i = 0; i < N; i++) {
//maxError = max(maxError, (y[i]*y[i]-(int8_t)(((i%256*i%256)%256))));
if (y[i] != (int8_t)((i%16)*(i%16))%256)
printf("Elements at pos %d not matching: y[i]=%x, expected = %x, i*i=%x\n", i,(int8_t)y[i], (int8_t)(x[i]*x[i]), ((i%16)*(i%16))%256);
}
printf("Max error: %d\n", maxError);
}
|
13,044 | /*
============================================================================
Name : binconnected_components.cu
Author : Diptanshu, Gaurav
Version :
Copyright : (c) 2018
Description : CUDA compute reciprocals
============================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define SWAP(a, b) {int swp=a; a=b; b=swp;}
#define MAX_HEIGHT 10
int numIterations;
/*
* */
__global__ void bfs(int *adjList, int *offset, int *inpFrontier, int *outFrontier,
int *parent, int *visited, int *treeEdges, int *s1, int *s2)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < *s1 && visited[inpFrontier[tid]] == 0) {
int v = inpFrontier[tid]; // Current vertex
// Put all the unvisited neighbors into outFrontier
for (int i = offset[v]; i < offset[v + 1]; i++) {
if (!visited[adjList[i]] && atomicCAS(&parent[adjList[i]], -1, v) == -1) {
int old = atomicAdd(s2, 1);
outFrontier[old] = adjList[i];
treeEdges[i] = -1;
}
else if (adjList[i] == parent[v]) {
treeEdges[i] = -2;
// Place the parent as the first element in adjList
if (i != offset[v]) {
SWAP(adjList[offset[v]], adjList[i]);
SWAP(treeEdges[offset[v]], treeEdges[i]);
}
}
else if (v < adjList[i]) {
// Non tree edge, mark only in one direction such that a < b for any non-tree edge a->b.
treeEdges[i] = v;
}
else {
treeEdges[i] = -2;
}
}
visited[v] = 1;
}
}
/*
*
* */
__global__ void lca(int *adjList, int *offset, int *parent, int *nonTreeEdges,
int *unfinished, int *threadEdge, int *lcaThread, int *auxAdjList,
int vertexCount)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x, i = 0, len1, len2;
int a = nonTreeEdges[3 * tid];
int b = nonTreeEdges[3 * tid + 1];
int eid = nonTreeEdges[3 * tid + 2];
int path_a[MAX_HEIGHT], path_b[MAX_HEIGHT];
while (a != 0)
{
path_a[i++] = a;
a = parent[a];
}
path_a[i++] = 0;
len1 = i;
i = 0;
while (b != 0)
{
path_b[i++] = b;
b = parent[b];
}
path_b[i++] = 0;
len2 = i;
i = 0;
while (i < len1 && i < len2 && path_a[len1 - i - 1] == path_b[len2 - i - 1])
i++;
int lcaVertex = path_a[len1 - i];
//printf("Edge %d: %d %d LCA %d\n", eid, nonTreeEdges[3 * tid], nonTreeEdges[3 * tid + 1], lcaVertex);
len1 -= i;
len2 -= i;
lcaThread[tid] = lcaVertex;
// Mark the non-tree edge visited
threadEdge[eid] = tid;
// Mark the rest of the edges visited and the vertices as part of unfinished traversal
for (i = 0; i < len1; i++) {
threadEdge[offset[path_a[i]]] = tid;
if (i != len1 - 1)
unfinished[path_a[i]] = 1;
}
for (i = 0; i < len2; i++) {
threadEdge[offset[path_b[i]]] = tid;
if (i != len2 - 1)
unfinished[path_b[i]] = 1;
}
__syncthreads();
// Create auxiliary vertex
// Special case for root vertex
// As root vertex doesn't have any parent, we don't set its parent.
if (lcaVertex != 0)
auxAdjList[2 * lcaVertex] = adjList[offset[lcaVertex]];
auxAdjList[2 * lcaVertex + 1] = lcaVertex;
}
__global__ void lca1(int *adjList, int *offset, int *parent, int *nonTreeEdges,
int *unfinished, int *threadEdge, int *lcaThread, int *auxAdjList,
int vertexCount, int edgeCount)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x, i = 0, len1, len2;
int a = nonTreeEdges[3 * tid];
int b = nonTreeEdges[3 * tid + 1];
if (auxAdjList[2 * a + 1] != -1)
a += vertexCount;
if (auxAdjList[2 * b + 1] != -1)
b += vertexCount;
int eid = nonTreeEdges[3 * tid + 2];
int path_a[MAX_HEIGHT], path_b[MAX_HEIGHT];
while (a != 0)
{
path_a[i++] = a;
if (a < vertexCount && auxAdjList[2 * a + 1] != -1)
a = vertexCount + a;
else if (a >= vertexCount)
a = parent[a - vertexCount];
else
a = parent[a];
}
path_a[i++] = 0;
len1 = i;
i = 0;
while (b != 0)
{
path_b[i++] = b;
if (b < vertexCount && auxAdjList[2 * b + 1] != -1)
b = vertexCount + b;
else if (b >= vertexCount)
b = parent[b - vertexCount];
else
b = parent[b];
}
path_b[i++] = 0;
len2 = i;
i = 0;
while (i < len1 && i < len2 && path_a[len1 - i - 1] == path_b[len2 - i - 1])
i++;
//int lcaVertex = path_a[len1 - i];
//printf("Edge %d: %d %d LCA %d\n", eid, nonTreeEdges[3 * tid], nonTreeEdges[3 * tid + 1], lcaVertex);
len1 -= i;
len2 -= i;
// Mark the non-tree edge visited
threadEdge[eid] = tid;
for (i = 0; i < len1; i++) {
if (path_a[i] >= vertexCount) {
threadEdge[edgeCount + path_a[i] - vertexCount] = tid;
}
else {
threadEdge[offset[path_a[i]]] = tid;
}
}
for (i = 0; i < len2; i++) {
if (path_b[i] >= vertexCount) {
threadEdge[edgeCount + path_b[i] - vertexCount] = tid;
}
else {
threadEdge[offset[path_b[i]]] = tid;
}
}
}
__global__ void auxGraph(int *adjList, int *offset, int *lcaThread, int vertexCount,
int *rootLCACount, int *auxAdjList) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int lcaVertex = lcaThread[tid];
if (lcaVertex != 0)
adjList[offset[lcaVertex]] = vertexCount + lcaVertex;
else
atomicAdd(rootLCACount, 1);
// Update grandParent's child
int grandParent = auxAdjList[2 * lcaVertex];
for (int i = offset[grandParent]; i < offset[grandParent + 1]; i++) {
if (adjList[i] == lcaVertex) {
adjList[i] = vertexCount + lcaVertex;
break;
}
}
}
__global__ void markArtPoint(int *adjList, int *offset, int *lcaThread, int *artPoint,
int *unfinished, int *rootLCACount) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int lcaVertex = lcaThread[tid];
bool bridge = false;
for (int i = offset[lcaVertex]; i < offset[lcaVertex + 1]; i++) {
if (!unfinished[adjList[i]]) {
bridge = true;
break;
}
}
printf("vertex %d rootLCACOUnt %d bridge %d\n", lcaVertex, *rootLCACount, bridge);
if (lcaVertex != 0 && bridge)
artPoint[lcaVertex] = 1;
else if (lcaVertex == 0 && bridge && *rootLCACount > 1)
artPoint[0] = 1;
}
/*
* Finds BCC Id for each edge. If an edge was part of the path to an LCA and
* that LCA happens to be an articulation point, we assign the LCA's vertex ID as BCC id to the edge.
* Otherwise, we traverse up the tree to find an LCA which is an articulation point.
*/
__global__ void findBCC(int *adjList, int *offset, int *threadEdge, int *lcaThread,
int *artPoint, int *bccId) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int lcaVertex = threadEdge[tid];
// TODO: Unfinished implementation
// Note: For each undirected edge b/w a-b, only one direction is marked in the threadEdge
if (lcaVertex != -1) {
while (!artPoint[lcaVertex]) {
//lcaVertex = adjList[offset[lcaVertex]];
}
bccId[tid] = lcaVertex;
}
}
int main(int argc, char **argv)
{
char* edgeListFile = argv[1];
FILE *fp;
fp = fopen(edgeListFile, "r");
if (fp == NULL) {
printf("ERROR: File does not exist!\n");
return 1;
}
int vertexCount, edgeCount;
fscanf(fp, "%d", &vertexCount);
fscanf(fp, "%d", &edgeCount);
printf("VertexCount %d\n", vertexCount);
printf("EdgeCount %d\n", edgeCount);
// Data structure to represent the graph in CSR format
int *adjList; // Concatenated adjacency list
int *offset; // Stores offset of each vertex's adjacency list
size_t adjListSize = edgeCount * sizeof(int);
size_t offsetSize = (vertexCount + 1) * sizeof(int);
size_t verticesSize = vertexCount * sizeof(int);
adjList = (int *)malloc(adjListSize);
offset = (int *)malloc(offsetSize);
int edgeCounter = 0, vertexCounter = 0;
int prevSource, source, dest;
fscanf(fp, "%d %d", &prevSource, &dest);
// Convert the graph to CSR format
while (edgeCounter != edgeCount) {
while (vertexCounter <= prevSource) // Includes the vertices with no edges
offset[vertexCounter++] = edgeCounter;
adjList[edgeCounter++] = dest;
while (fscanf(fp, "%d %d", &source, &dest) == 2 && source == prevSource)
adjList[edgeCounter++] = dest;
prevSource = source;
}
// Mark the sentinel values so that the degree of any vertex i = offset[i + 1] - offset[i]
while (vertexCounter <= vertexCount)
offset[vertexCounter++] = edgeCount;
// printf("Adjacency List\n");
// for(int i = 0; i < edgeCount; i++) {
// printf("%d ", adjList[i]);
// }
//
// printf("\nOffsets\n");
// for(int i = 0; i < vertexCount + 1; i++) {
// printf("%d ", offset[i]);
// }
// printf("\n");
// Initialize other data structure to be used for bfs
int *inpFrontier, *outFrontier, *visited, *parent, *treeEdges;
int s1, s2; // Size of input and output frontiers
int treeEdgeCount = 0;
inpFrontier = (int *)calloc(vertexCount, sizeof(int));
outFrontier = (int *)calloc(vertexCount, sizeof(int));
visited = (int *)calloc(vertexCount, sizeof(int));
treeEdges = (int *)calloc(edgeCount, sizeof(int));
parent = (int *)malloc(verticesSize);
memset(parent, -1, verticesSize);
s1 = 1; s2 = 0;
inpFrontier[0] = 0; // Inserting source vertex
// Corresponding device data
int *d_adjList, *d_offset;
int *d_inpFrontier, *d_outFrontier, *d_visited, *d_parent, *d_treeEdges;
int *d_s1, *d_s2;
cudaMalloc(&d_adjList, adjListSize);
cudaMalloc(&d_offset, offsetSize);
cudaMalloc(&d_inpFrontier, verticesSize);
cudaMalloc(&d_outFrontier, verticesSize);
cudaMalloc(&d_visited, verticesSize);
cudaMalloc(&d_treeEdges, edgeCount * sizeof(int));
cudaMalloc(&d_parent, verticesSize);
cudaMalloc(&d_s1, sizeof(int));
cudaMalloc(&d_s2, sizeof(int));
cudaMemcpy(d_adjList, adjList, adjListSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_offset, offset, offsetSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_inpFrontier, inpFrontier, verticesSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_outFrontier, outFrontier, verticesSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_visited, visited, verticesSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_treeEdges, treeEdges, edgeCount * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_parent, parent, verticesSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_s1, &s1, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_s2, &s2, sizeof(int), cudaMemcpyHostToDevice);
// Start the bfs
bool odd = true;
int inpQSize = s1;
numIterations = 0;
while (inpQSize != 0) {
dim3 blocksPerGrid ((inpQSize + 1023) / 1024);
dim3 threadsPerBlock ((inpQSize > 1024) ? 1024 : inpQSize);
if (odd) {
bfs<<<blocksPerGrid, threadsPerBlock>>>(d_adjList, d_offset, d_inpFrontier, d_outFrontier,
d_parent, d_visited, d_treeEdges, d_s1, d_s2);
cudaMemcpy(&inpQSize, d_s2, sizeof(int), cudaMemcpyDeviceToHost);
s1 = 0;
cudaMemcpy(d_s1, &s1, sizeof(int), cudaMemcpyHostToDevice);
}
else {
bfs<<<blocksPerGrid, threadsPerBlock>>>(d_adjList, d_offset, d_outFrontier, d_inpFrontier,
d_parent, d_visited, d_treeEdges, d_s2, d_s1);
cudaMemcpy(&inpQSize, d_s1, sizeof(int), cudaMemcpyDeviceToHost);
s2 = 0;
cudaMemcpy(d_s2, &s2, sizeof(int), cudaMemcpyHostToDevice);
}
odd = !odd;
numIterations++;
treeEdgeCount += inpQSize;
}
cudaMemcpy(visited, d_visited, verticesSize, cudaMemcpyDeviceToHost);
cudaMemcpy(parent, d_parent, verticesSize, cudaMemcpyDeviceToHost);
cudaMemcpy(treeEdges, d_treeEdges, edgeCount * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(adjList, d_adjList, edgeCount * sizeof(int), cudaMemcpyDeviceToHost);
// printf("Parent array\n");
// for (int i = 0; i < vertexCount; i++)
// printf("%d ", parent[i]);
// printf("\n");
//
// printf("Adjacency List\n");
// for(int i = 0; i < edgeCount; i++) {
// printf("(%d %d) ", i, adjList[i]);
// }
// printf("\n");
//
// for (int i = 0; i < vertexCount; i++) {
// if (parent[i] != adjList[offset[i]])
// printf("WRONG %d\n", i);
// }
//
// printf("Number of iterations %d \n", numIterations);
// printf("Visited array\n");
// for (int i = 0; i < vertexCount; i++)
// printf("%d ", visited[i]);
// printf("\n");
// printf("Tree Edges\n");
// for (int i = 0; i < edgeCount; i++)
// printf("%d ", treeEdges[i]);
// printf("\n");
int nonTreeEdgeCount = (edgeCount - 2 * treeEdgeCount) / 2;
// printf("treeEdgecount %d\n", treeEdgeCount);
// printf("Non-tree edges count %d\n", nonTreeEdgeCount);
dim3 blocksPerGrid ((nonTreeEdgeCount + 1023) / 1024);
dim3 threadsPerBlock ((nonTreeEdgeCount > 1024) ? 1024 : nonTreeEdgeCount);
int threadCount = blocksPerGrid.x * threadsPerBlock.x;
//printf("ThreadCount = %d\n", threadCount);
// Data structure to represent non tree edges
// a b i : edge a->b with edge id i
int *nonTreeEdges = (int *) calloc(3 * nonTreeEdgeCount, sizeof(int));
int *lcaThread = (int *) calloc(threadCount, sizeof(int));
int *threadEdge = (int *) malloc(edgeCount * sizeof(int));
memset(threadEdge, -1, edgeCount * sizeof(int));
int *unfinished = (int *) calloc(vertexCount, sizeof(int));
int *auxAdjList = (int *) malloc(2 * vertexCount * sizeof(int));
memset(auxAdjList, -1, 2 * vertexCount * sizeof(int));
int *artPoint = (int *) calloc(vertexCount, sizeof(int));
int rootLCACount = 0;
// Populate non tree edges
for (int i = 0, j = 0; i < edgeCount; i++) {
if (treeEdges[i] >= 0) {
nonTreeEdges[j++] = treeEdges[i];
nonTreeEdges[j++] = adjList[i];
nonTreeEdges[j++] = i;
}
}
/*
printf("Non tree edges\n");
for (int i = 0; i < 3 * nonTreeEdgeCount; i+=3) {
printf("%d %d %d\n", nonTreeEdges[i], nonTreeEdges[i + 1], nonTreeEdges[i + 2]);
}*/
int *d_nonTreeEdges, *d_lcaThread, *d_threadEdge, *d_unfinished, *d_auxAdjList, *d_artPoint, *d_rootLCACount;
cudaMalloc(&d_nonTreeEdges, 3 * nonTreeEdgeCount * sizeof(int));
cudaMalloc(&d_lcaThread, threadCount * sizeof(int));
cudaMalloc(&d_threadEdge, edgeCount * sizeof(int));
cudaMalloc(&d_unfinished, vertexCount * sizeof(int));
cudaMalloc(&d_auxAdjList, 2 * vertexCount * sizeof(int));
cudaMalloc(&d_artPoint, vertexCount * sizeof(int));
cudaMalloc(&d_rootLCACount, sizeof(int));
cudaMemcpy(d_nonTreeEdges, nonTreeEdges, 3 * nonTreeEdgeCount * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_lcaThread, lcaThread, threadCount * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_threadEdge, threadEdge, edgeCount * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_unfinished, unfinished, vertexCount * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_auxAdjList, auxAdjList, 2 * vertexCount * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_artPoint, artPoint, vertexCount * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_rootLCACount, &rootLCACount, sizeof(int), cudaMemcpyHostToDevice);
lca<<<blocksPerGrid, threadsPerBlock>>>(d_adjList, d_offset, d_parent, d_nonTreeEdges,
d_unfinished, d_threadEdge, d_lcaThread, d_auxAdjList,
vertexCount);
auxGraph<<<blocksPerGrid, threadsPerBlock>>>(d_adjList, d_offset, d_lcaThread, vertexCount, d_rootLCACount, d_auxAdjList);
int *threadEdge1 = (int *) malloc((edgeCount + vertexCount) * sizeof(int));
memset(threadEdge1, -1, (edgeCount + vertexCount) * sizeof(int));
int *d_threadEdge1;
cudaMalloc(&d_threadEdge1, (edgeCount + vertexCount) * sizeof(int));
cudaMemcpy(d_threadEdge1, threadEdge1, (edgeCount + vertexCount) * sizeof(int), cudaMemcpyHostToDevice);
lca1<<<blocksPerGrid, threadsPerBlock>>>(d_adjList, d_offset, d_parent, d_nonTreeEdges,
d_unfinished, d_threadEdge1, d_lcaThread, d_auxAdjList,
vertexCount, edgeCount);
cudaMemcpy(threadEdge1, d_threadEdge1, (edgeCount + vertexCount) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(lcaThread, d_lcaThread, threadCount * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(threadEdge, d_threadEdge, edgeCount * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(unfinished, d_unfinished, vertexCount * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(adjList, d_adjList, edgeCount * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(auxAdjList, d_auxAdjList, 2 * vertexCount * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(artPoint, d_artPoint, vertexCount * sizeof(int), cudaMemcpyDeviceToHost);
/*
printf("LCA Thread\n");
for (int i = 0; i < threadCount; i++)
printf("%d ", lcaThread[i]);
printf("\nthread Edge\n");
for (int i = 0; i < edgeCount; i++)
printf("%d ", threadEdge[i]);
printf("\n unfinished \n");
for (int i = 0; i < vertexCount; i++)
printf("%d ", unfinished[i]);
printf("\n Adj List\n");
for (int i = 0; i < edgeCount; i++)
printf("%d ", adjList[i]);
printf("\n Aux Adj List \n");
for (int i = 0; i < 2 * vertexCount; i+=2)
printf("%d %d\n", auxAdjList[i], auxAdjList[i + 1]);
printf("\n Art Point \n");
for (int i = 0; i < vertexCount; i++)
printf("%d %d\n", i, artPoint[i]);
printf("\n THREAD EDGE\n");
for (int i = 0; i < (edgeCount + vertexCount); i++)
printf("%d ", threadEdge1[i]);
*/
printf("\n");
for (int i = 0; i < threadCount; i++) {
if (threadEdge1[offset[lcaThread[i]]] == -1)
printf("%d ", lcaThread[i]);
}
printf("\n");
// Free allocated memory on device and host
cudaFree(d_adjList);
cudaFree(d_offset);
cudaFree(d_inpFrontier);
cudaFree(d_outFrontier);
cudaFree(d_visited);
cudaFree(d_treeEdges);
cudaFree(d_parent);
cudaFree(d_s1);
cudaFree(d_s2);
cudaFree(d_nonTreeEdges);
cudaFree(d_lcaThread);
cudaFree(d_threadEdge);
cudaFree(d_unfinished);
cudaFree(d_artPoint);
cudaFree(d_rootLCACount);
free(inpFrontier);
free(outFrontier);
free(visited);
free(treeEdges);
free(parent);
free(nonTreeEdges);
free(lcaThread);
free(threadEdge);
free(unfinished);
free(artPoint);
return 0;
}
|
13,045 |
#include <stdio.h>
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
extern "C" {
int test_addition(void) {
printf("CUDA status: %d\n", cudaDeviceSynchronize());
int a, b, c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Setup input values
a = 1;
b = 7;
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<1,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
printf("Result is: %d\n", c);
return c;
}
}
extern "C" {
void count_devices(void) {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
} |
13,046 | #include<stdlib.h>
#include<stdio.h>
#include<time.h>
#define BLOCK_SIZE 16
void init_mat(float*a ,const int N,const int M);
void print_mat(float*a ,const int N,const int M);
__global__
void matrixMultiply(float*a,float*b,float*c,int m,int n,int k)
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
int sum=0;
if(col<k && row<m)
{
for(int i=0;i<n;i++)
{
sum += a[row*n+i]*b[i*k+col];
}
c[row*k + col]=sum;
}
}
int main()
{
srand(time(NULL));
float *a,*b,*c;
float *d_a,*d_b,*d_c;
int M=5;
int N=4;
int P =3;
//a = [5*4],b= [4*3], c=[5*3]
a = (float*)malloc(sizeof(float)*M*N);
b = (float*)malloc(sizeof(float)*N*P);
c = (float*)malloc(sizeof(float)*M*P);
init_mat(a,M,N);
init_mat(b,N,P);
//init_mat(c,M,P);
printf("Initial data:\n");
print_mat(a,M,N);
print_mat(b,N,P);
print_mat(c,M,P);
cudaMalloc(&d_a,sizeof(float)*M*N);
cudaMalloc(&d_b,sizeof(float)*N*P);
cudaMalloc(&d_c,sizeof(float)*M*P);
cudaMemcpy(d_a,a,sizeof(float)*M*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,sizeof(float)*N*P,cudaMemcpyHostToDevice);
//dim3 dimGrid((P + BLOCK_SIZE - 1) / BLOCK_SIZE, (M + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimGrid(1,1);
dim3 dimBlock(16, 16);
matrixMultiply<<<dimGrid,dimBlock>>>(d_a,d_b,d_c,M,N,P);
cudaMemcpy(c,d_c,sizeof(float)*M*P,cudaMemcpyDeviceToHost);
printf("Final data:\n");
print_mat(c,M,P);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
delete[] a;
delete[] b;
delete[] c;
return 0;
}
void init_mat(float*a ,const int N,const int M)
{
for(int i=0;i<N;i++)
for(int j=0;j<M;j++)
a[i*M + j]= rand()%N +1;
}
void print_mat(float*a ,const int N,const int M)
{
for(int i=0;i<N;i++)
{
for(int j=0;j<M;j++)
{
printf("% f",a[i*M + j]);
}
printf("\n");
}
printf("\n");
}
|
13,047 | #include "includes.h"
extern "C" {
}
#define IDX2C(i, j, ld) ((j)*(ld)+(i))
#define SQR(x) ((x)*(x)) // x^2
__global__ void transpose_kernel(double const* matrices, double* transposed) {
int matrix_offset = blockIdx.x * blockDim.x * blockDim.y;
int matrix_index = matrix_offset + blockDim.x * threadIdx.y + threadIdx.x;
int transpose_index = matrix_offset + IDX2C(threadIdx.y, threadIdx.x, blockDim.y);
transposed[transpose_index] = matrices[matrix_index];
} |
13,048 | //xfail:BOOGIE_ERROR
//--blockDim=128 --gridDim=16 --no-inline
//assert\(false\)
#include <stdio.h>
#include <assert.h>
#include "cuda.h"
#include "cuda_runtime_api.h"
typedef void(*funcType)(float*);
__device__ void a(float *v)
{
printf ("funcA with p%f = %f", *v, *v);
}
__device__ void b(float *v)
{
printf ("funcB with p%f = %f", *v, *v);
}
__device__ void c(float *v)
{
printf ("funcC with p%f = %f", *v, *v);
}
__device__ void d(float *v)
{
printf ("funcD with p%f = %f", *v, *v);
}
__device__ void e(float *v)
{
printf ("funcE with p%f = %f", *v, *v);
}
__global__ void should_fail(float * __restrict p1, float * __restrict p2, float * __restrict p3, float * __restrict p4, float * __restrict p5, int x, int y)
{
funcType fp = a;
switch(x) {
case 1:
fp = &a;
break;
case 2:
fp = &b;
break;
case 3:
fp = &c;
break;
case 4:
fp = &d;
break;
default:
fp = &e;
break;
}
switch(y) {
case 1:
fp(p1);
break;
case 2:
fp(p2);
break;
case 3:
fp(p3);
break;
case 4:
fp(p4);
break;
default:
fp(p5);
break;
}
assert(1);
}
int main (){
float p1, p2, p3, p4, p5;
float *dev_p1, *dev_p2, *dev_p3, *dev_p4, *dev_p5;
p1 = 1; p2 = 2; p3 = 3; p4 = 4; p5 = 5;
cudaMalloc((void**)&dev_p1, sizeof(float));
cudaMalloc((void**)&dev_p2, sizeof(float));
cudaMalloc((void**)&dev_p3, sizeof(float));
cudaMalloc((void**)&dev_p4, sizeof(float));
cudaMalloc((void**)&dev_p5, sizeof(float));
cudaMemcpy(dev_p1,&p1, sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_p2,&p2, sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_p3,&p3, sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_p4,&p4, sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_p5,&p5, sizeof(float),cudaMemcpyHostToDevice);
should_fail <<<1,2>>>(dev_p1, dev_p2, dev_p3, dev_p4, dev_p5, 4, 4);
//ESBMC_verify_kernel_f(should_fail,1,2,dev_p1, dev_p2, dev_p3, dev_p4, dev_p5, 4, 4);
cudaMemcpy(&p1,dev_p1,sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(&p2,dev_p2,sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(&p3,dev_p3,sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(&p4,dev_p4,sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(&p5,dev_p5,sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(dev_p1);
cudaFree(dev_p2);
cudaFree(dev_p3);
cudaFree(dev_p4);
cudaFree(dev_p5);
return 0;
}
|
13,049 | #include "GalaxyCollision.cuh"
__device__ int doPrint = 0;
__device__
float3 bodiesInteraction2(float4 body1, float4 body2, float3 acceleration)
{
float EPS2 = 0.1f;
float3 r;
r.x = body2.x - body1.x;
r.y = body2.y - body1.y;
r.z = body2.z - body1.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
float distSqr = sqrtf(r.x * r.x + r.y * r.y + r.z * r.z);
distSqr *= distSqr;
distSqr += EPS2;
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f / sqrtf(distSixth);
// s = m_j * invDistCube [1 FLOP]
float s = body2.w * invDistCube;
acceleration.x += r.x * s;
acceleration.y += r.y * s;
acceleration.z += r.z * s;
return acceleration;
}
__device__
float3 bodiesInteraction(float4 body1, float4 body2, float3 acceleration)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float EPS2 = 0.01f;
float ep = 1.0f;
float3 r;
r.x = body2.x - body1.x;
r.y = body2.y - body1.y;
r.z = body2.z - body1.z;
float distSqr = (r.x * r.x) + (r.y * r.y) + (r.z * r.z);
//distSqr *= distSqr;
distSqr += EPS2;
float dist = sqrtf(distSqr);
float distCube = dist * dist * dist;
float s = (body2.w) / distCube;
acceleration.x += r.x * s * ep;
acceleration.y += r.y * s * ep;
acceleration.z += r.z * s * ep;
/*
if(tid == 0 && r.y > 10.0)
{
doPrint = 99;
printf("\n");
printf("body1.w: %lf \n", body1.w);
printf("body2.w: %lf \n", body2.w);
printf("\n");
printf("r.x: %lf \n", r.x);
printf("r.y: %lf \n", r.y);
printf("r.z: %lf \n", r.z);
printf("\n");
printf("distSqr: %lf \n", distSqr);
printf("dist: %lf \n", dist);
printf("distCube: %lf \n", distCube);
printf("s: %lf \n", s);
printf("\n");
printf("acc.x: %lf \n", acceleration.x);
printf("acc.y: %lf \n", acceleration.y);
printf("acc.z: %lf \n", acceleration.z);
}
*/
return acceleration;
}
__device__
float3 tileAcceleration(float4 currPosition, float3 acceleration)
{
int i;
extern __shared__ float4 shPosition[];
for (i = 0; i < blockDim.x; i++) {
acceleration = bodiesInteraction(currPosition, shPosition[i], acceleration);
}
return acceleration;
}
__global__
void calculateForcesKernel(float4* bodyDescription, float3* acceleration, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size)
{
extern __shared__ float4 shPosition[];
float4 currPosition;
float3 acc = { 0.0f, 0.0f, 0.0f };
int i, tile;
currPosition = bodyDescription[tid];
for (i = 0, tile = 0; i < size; i += blockDim.x, tile++)
{
int idx = tile * blockDim.x + threadIdx.x;
if (idx < size){
shPosition[threadIdx.x] = bodyDescription[idx];
__syncthreads();
acc = tileAcceleration(currPosition, acc);
__syncthreads();
}
}
// Save the result in global memory for the integration step.
acceleration[tid] = acc;
}
}
__host__
void galaxyCollisionLogic(float4* d_bodyDescription, float3* d_acceleration, int size)
{
double4 test;
dim3 DimGrid((size / MAX_BLOCK_THREAD_COUNT) + 1, 1, 1);
dim3 DimBlock(MAX_BLOCK_THREAD_COUNT, 1, 1);
//printf("DimGrid: x: %d, y: %d, z: %d \n", DimGrid.x, DimGrid.y, DimGrid.z);
//printf("DimBlock: x: %d, y: %d, z: %d \n\n", DimBlock.x, DimBlock.y, DimBlock.z);
calculateForcesKernel << <DimGrid, DimBlock, MAX_BLOCK_THREAD_COUNT * sizeof(float4)>> >
(d_bodyDescription, d_acceleration, size);
cudaDeviceSynchronize();
doPrint = 0;
}
__host__
void galaxyCollisionInit(float4* bodyDescription, float3* acceleration, int count)
{
float4* d_bodyDescription;
float3* d_acceleration;
int sizef3 = count * sizeof(float3);
int sizef4 = count * sizeof(float4);
cudaError_t err;
/*********** MEMORY ALLOCATION ***********/
if ((err = cudaMalloc((void**)&d_bodyDescription, sizef4)) != cudaSuccess) C_ERR(err);
if ((err = cudaMalloc((void**)&d_acceleration, sizef3)) != cudaSuccess) C_ERR(err);
/*********** COPY MEMORY TO DEVICE ***********/
if ((err = cudaMemcpy(d_bodyDescription, bodyDescription, sizef4, cudaMemcpyHostToDevice)) != cudaSuccess) C_ERR(err);
//if ((err = cudaMemcpy(d_acceleration, acceleration, sizef3, cudaMemcpyHostToDevice)) != cudaSuccess) ERR(err);
// Work
galaxyCollisionLogic(d_bodyDescription, d_acceleration, count);
/*********** COPY MEMORY BACK TO HOST ***********/
if ((err = cudaMemcpy(acceleration, d_acceleration, sizef3, cudaMemcpyDeviceToHost)) != cudaSuccess) C_ERR(err);
/*********** FREE MEMORY ***********/
if ((err = cudaFree(d_bodyDescription)) != cudaSuccess) C_ERR(err);
if ((err = cudaFree(d_acceleration)) != cudaSuccess) C_ERR(err);
// Update galaxies ...
/*
for (int i = 0; i < count; i+=100)
{
printf("Acc: %lf, %lf, %lf\n", acceleration[i].x, acceleration[i].y, acceleration[i].z);
}*/
// Init the body description - position and mass
}
|
13,050 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define BLOCKSIZE 16
__global__ void MatrixMultiplyI(int *matrix1, int *matrix2, int *matrix3, int m1, int n1, int m2, int n2)
{
int i, j;
__shared__ int ds_M[BLOCKSIZE][BLOCKSIZE];
__shared__ int ds_N[BLOCKSIZE][BLOCKSIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
int sum = 0;
for (i = 0; i < (n2-1)/BLOCKSIZE+1; i++)
{
if(row < n2 && i*BLOCKSIZE+tx < n2)
{
ds_M[ty][tx] = matrix1[row*n2 + i*BLOCKSIZE+tx];
}
else
{
ds_M[ty][tx] = 0;
}
if(i*BLOCKSIZE+ty < n2 && col < n2)
{
ds_N[ty][tx] = matrix2[(i*BLOCKSIZE+ty)*n2 + col];
}
else
{
ds_N[ty][tx] = 0;
}
__syncthreads();
if(row < n2 && col < n2)
{
for (j = 0; j < BLOCKSIZE; j++)
{
sum = sum + (ds_M[ty][j] * ds_N[j][tx]);
}
}
__syncthreads();
}
if(row < n2 && col < n2)
matrix3[row*n2+col] = sum;
}
__global__ void MatrixMultiplyF(float *matrix1, float *matrix2, float *matrix3, int m1, int n1, int m2, int n2)
{
int i, j;
__shared__ float ds_M[BLOCKSIZE][BLOCKSIZE];
__shared__ float ds_N[BLOCKSIZE][BLOCKSIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
float sum = 0.0;
for (i = 0; i < n2/BLOCKSIZE; i++)
{
if(row < n2 && i*BLOCKSIZE+tx < n2)
{
ds_M[ty][tx] = matrix1[row*n2 + i*BLOCKSIZE+tx];
}
else
{
ds_M[ty][tx] = 0.0;
}
if(i*BLOCKSIZE+ty < n2 && col < n2)
{
ds_N[ty][tx] = matrix2[(i*BLOCKSIZE+ty)*n2 + col];
}
else
{
ds_N[ty][tx] = 0.0;
}
__syncthreads();
if(row < n2 && col < n2)
{
for (j = 0; j < BLOCKSIZE; j++)
{
sum = sum + (ds_M[ty][j] * ds_N[j][tx]);
}
}
__syncthreads();
}
if(row < n2 && col < n2)
matrix3[row*n2+col] = sum;
}
void read_imatrix(char *filename, int *m, int *n, int **values)
{
FILE* name;
int i, j, k;
int t1, t2, t3;
name = fopen(filename, "r+");
if(name != NULL)
{
k = 0;
fscanf(name, "%d %d\n", &t1, &t2);
*m = t1;
*n = t2;
*values = (int *)calloc(t1*t2, sizeof(int));
for(i = 1; i <= t1; i++)
{
for(j = 1; j <= t2; j++)
{
if(j < t2)
{
fscanf(name, "%d,", &t3);
*(*values+k) = t3;
k++;
}
else
{
fscanf(name, "%d\n", &t3);
*(*values+k) = t3;
k++;
}
}
}
fclose(name);
}
else
{
printf("File read failed\n");
exit(1);
}
}
void read_fmatrix(char *filename, int *m, int *n, float **values)
{
FILE* name;
int i, j, k;
int t1, t2;
float t3;
name = fopen(filename, "r+");
if(name != NULL)
{
k = 0;
fscanf(name, "%d %d\n", &t1, &t2);
*m = t1;
*n = t2;
*values = (float *)calloc(t1*t2, sizeof(float));
for(i = 1; i <= t1; i++)
{
for(j = 1; j <= t2; j++)
{
if(j < t2)
{
fscanf(name, "%f,", &t3);
*(*values+k) = t3;
k++;
}
else
{
fscanf(name, "%f\n", &t3);
*(*values+k) = t3;
k++;
}
}
}
fclose(name);
}
else
{
printf("File read failed\n");
exit(1);
}
}
void write_imatrix(char *filename, int *m, int *n, int **values)
{
FILE* name;
int i, j, k;
int t1, t2, t3;
name = fopen(filename, "w+");
if(name != NULL)
{
k = 0;
t1 = *m;
t2 = *n;
fprintf(name, "%d %d\n", t1, t2);
for(i = 1; i <= t1; i++)
{
for(j = 1; j <= t2; j++)
{
if(j < t2)
{
t3 = *(*values+k);
fprintf(name, "%d,", t3);
k++;
}
else
{
t3 = *(*values+k);
fprintf(name, "%d\n", t3);
k++;
}
}
}
fclose(name);
}
else
{
printf("File write failed\n");
exit(1);
}
}
void write_fmatrix(char *filename, int *m, int *n, float **values)
{
FILE* name;
int i, j, k;
int t1, t2;
float t3;
name = fopen(filename, "w+");
if(name != NULL)
{
k = 0;
t1 = *m;
t2 = *n;
fprintf(name, "%d %d\n", t1, t2);
for(i = 1; i <= t1; i++)
{
for(j = 1; j <= t2; j++)
{
if(j < t2)
{
t3 = *(*values+k);
fprintf(name, "%f,", t3);
k++;
}
else
{
t3 = *(*values+k);
fprintf(name, "%f\n", t3);
k++;
}
}
}
fclose(name);
}
else
{
printf("File write failed\n");
exit(1);
}
}
void matrix_check(int m1, int n1, int m2, int n2)
{
if ((n1-m2) != 0)
{
printf("Matrix dimensions must be PxQ and QxR respectively\n");
exit(1);
}
}
int main(int argc, char *argv[])
{
int m1, n1, m2, n2, GRIDX, GRIDY;
if (argc != 5)
{
printf("Usage: ./matrix-multiplication matrix1.mat matrix2.mat matrix3.mat float/int \n");
exit(1);
}
if (strcmp(argv[4], "float") == 0)
{
float *hostmatrix1, *hostmatrix2, *hostmatrix3;
float *devicematrix1, *devicematrix2, *devicematrix3;
read_fmatrix(argv[1], &m1, &n1, &hostmatrix1);
read_fmatrix(argv[2], &m2, &n2, &hostmatrix2);
matrix_check(m1, n1, m2, n2);
size_t matrix_size = m1*n1*sizeof(float);
hostmatrix3 = (float *)calloc(m1*n1, sizeof(float));
cudaMalloc(&devicematrix1, matrix_size);
cudaMalloc(&devicematrix2, matrix_size);
cudaMalloc(&devicematrix3, matrix_size);
cudaMemcpy(devicematrix1, hostmatrix1, matrix_size, cudaMemcpyHostToDevice);
cudaMemcpy(devicematrix2, hostmatrix2, matrix_size, cudaMemcpyHostToDevice);
GRIDX = (int)ceil((float)m1/BLOCKSIZE);
GRIDY = (int)ceil((float)n2/BLOCKSIZE);
dim3 dimGrid(GRIDX, GRIDY, 1);
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1);
MatrixMultiplyF <<< dimGrid, dimBlock >>> (devicematrix1, devicematrix2, devicematrix3, m1, n1, m2, n2);
cudaMemcpy(hostmatrix3, devicematrix3, matrix_size, cudaMemcpyDeviceToHost);
write_fmatrix(argv[3], &m1, &n1, &hostmatrix3);
cudaFree(devicematrix1);
cudaFree(devicematrix2);
cudaFree(devicematrix3);
free(hostmatrix1);
free(hostmatrix2);
free(hostmatrix3);
}
if (strcmp(argv[4], "int") == 0)
{
int *hostmatrix1, *hostmatrix2, *hostmatrix3;
int *devicematrix1, *devicematrix2, *devicematrix3;
read_imatrix(argv[1], &m1, &n1, &hostmatrix1);
read_imatrix(argv[2], &m2, &n2, &hostmatrix2);
matrix_check(m1, n1, m2, n2);
size_t matrix_size = m1*n1*sizeof(int);
hostmatrix3 = (int *)calloc(m1*n1, sizeof(int));
cudaMalloc(&devicematrix1, matrix_size);
cudaMalloc(&devicematrix2, matrix_size);
cudaMalloc(&devicematrix3, matrix_size);
cudaMemcpy(devicematrix1, hostmatrix1, matrix_size, cudaMemcpyHostToDevice);
cudaMemcpy(devicematrix2, hostmatrix2, matrix_size, cudaMemcpyHostToDevice);
GRIDX = (int)ceil((float)m1/BLOCKSIZE);
GRIDY = (int)ceil((float)n2/BLOCKSIZE);
dim3 dimGrid(GRIDX, GRIDY, 1);
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1);
MatrixMultiplyI <<< dimGrid, dimBlock >>> (devicematrix1, devicematrix2, devicematrix3, m1, n1, m2, n2);
cudaMemcpy(hostmatrix3, devicematrix3, matrix_size, cudaMemcpyDeviceToHost);
write_imatrix(argv[3], &m1, &n1, &hostmatrix3);
cudaFree(devicematrix1);
cudaFree(devicematrix2);
cudaFree(devicematrix3);
free(hostmatrix1);
free(hostmatrix2);
free(hostmatrix3);
}
return 0;
}
|
13,051 | #include "includes.h"
__global__ void maxi(int * a, int * b, int n) {
int block = 256 * blockIdx.x;
int max = 0;
for (int i = block; i < min(256 + block, n); i++) {
if (max < a[i]) {
max = a[i];
}
}
b[blockIdx.x] = max;
} |
13,052 | #include "cuda.h"
__global__ void ScatterAddOps_forward_kernel(double *out, const double *ipt, const long long *ii,
const double *update, int d, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p < n){
out[ii[p]-1] += update[p];
}
}
void Gpu_ScatterAddOps_forward(double *out, const double *ipt, const long long *ii,
const double *update, int d, int n){
cudaMemcpy(out, ipt, sizeof(double) * d, cudaMemcpyDeviceToDevice);
ScatterAddOps_forward_kernel<<< (n-1)/64 + 1, 64 >>>(out, ipt, ii, update, d, n);
}
__global__ void ScatterAddOps_backward_kernel(double *grad_ipt, double *grad_update,
const double *grad_out,
const double *out, const double *ipt, const long long *ii,
const double *update, int d, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p < n){
grad_update[p] = grad_out[ii[p]-1];
}
}
void Gpu_ScatterAddOps_backward(
double *grad_ipt, double *grad_update,
const double *grad_out,
const double *out, const double *ipt, const long long *ii,
const double *update, int d, int n){
cudaMemcpy(grad_ipt, grad_out, sizeof(double)*d, cudaMemcpyDeviceToDevice);
ScatterAddOps_backward_kernel<<< (n-1)/64 + 1, 64 >>>(grad_ipt, grad_update, grad_out, out, ipt, ii,
update, d, n);
}
|
13,053 | extern "C" {
__global__ void gamma_transform_2(float3* d_idata, float3* d_odata, int width, int height, float gamma)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = yIndex * width + xIndex;
if (xIndex < width && yIndex < height){
float3 rgb = d_idata[idx];
d_odata[idx].x = powf(rgb.x, gamma);
d_odata[idx].y = powf(rgb.y, gamma);
d_odata[idx].z = powf(rgb.z, gamma);
}
}
} |
13,054 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
__global__ void addKernel(int* c, const int* a, const int* b, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
void addWithCuda(int* c, const int* a, const int* b, int size) {
int* dev_a = nullptr;
int* dev_b = nullptr;
int* dev_c = nullptr;
// Allocate GPU buffers for three vectors (two input, one output)
cudaMalloc((void**)&dev_c, size * sizeof(int));
cudaMalloc((void**)&dev_a, size * sizeof(int));
cudaMalloc((void**)&dev_b, size * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
// 2 is number of computational blocks and (size + 1) / 2 is a number of threads in a block
addKernel<<<2, (size + 1) / 2>>>(dev_c, dev_a, dev_b, size);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaDeviceSynchronize();
// Copy output vector from GPU buffer to host memory.
cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
}
int main(int argc, char** argv) {
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
addWithCuda(c, a, b, arraySize);
printf("{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n", c[0], c[1], c[2], c[3], c[4]);
//cudaDeviceReset();
return 0;
}
|
13,055 | //------------------------------------------------------------------------------
//
// Name: gameoflife.cu
//
// Purpose: CUDA implementation of Conway's game of life
//
// HISTORY: Written by Tom Deakin and Simon McIntosh-Smith, August 2013
//
//------------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define FINALSTATEFILE "final_state.dat"
// Define the state of the cell
#define DEAD 0
#define ALIVE 1
/*************************************************************************************
* Forward declarations of utility functions
************************************************************************************/
void die(const char* message, const int line, const char* file);
void load_board(char* board, const char* file, const unsigned int nx, const unsigned int ny);
void print_board(const char* board, const unsigned int nx, const unsigned int ny);
void save_board(const char* board, const unsigned int nx, const unsigned int ny);
void load_params(const char* file, unsigned int* nx, unsigned int* ny, unsigned int* iterations);
void errorCheck(cudaError_t error);
/*************************************************************************************
* Game of Life worker method - CUDA kernel
************************************************************************************/
// Apply the rules of life to tick and save in tock
__global__ void accelerate_life(const char* tick, char* tock, const int nx, const int ny)
{
// The cell we work on in the loop
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int idy = blockDim.y * blockIdx.y + threadIdx.y;
// Index with respect to global array
unsigned int id = idy * nx + idx;
unsigned int id_b = (threadIdx.y + 1) * (blockDim.x + 2) + threadIdx.x + 1;
// Copy block to shared memory
extern __shared__ char block[];
block[id_b] = tick[id];
// Copy the halo cells (those around the block) to shared memory
const unsigned int block_r = (blockIdx.x + 1) % gridDim.x;
const unsigned int block_l = (blockIdx.x == 0) ? gridDim.x - 1 : blockIdx.x - 1;
const unsigned int block_u = (blockIdx.y + 1) % gridDim.y;
const unsigned int block_d = (blockIdx.y == 0) ? gridDim.y - 1 : blockIdx.y - 1;
// Select the first row of threads
if (threadIdx.y == 0)
{
// Down row
block[threadIdx.x + 1] = tick[(blockDim.y * block_d + blockDim.y - 1) * nx + idx];
}
// Select the last row of threads
if (threadIdx.y == blockDim.y - 1)
{
// Up row
block[id_b + blockDim.x + 2] = tick[(blockDim.y * block_u) * nx + idx];
}
// Select right column of threads
if (threadIdx.x == blockDim.x - 1)
{
// Copy in right
block[id_b + 1] = tick[nx * idy + (blockDim.x * block_r)];
}
// Select left column of threads
if (threadIdx.x == 0)
{
// Copy in left
block[id_b - 1] = tick[nx * idy + (blockDim.x * block_l + blockDim.x - 1)];
}
// Add the 4 corner halo cells
block[0] = tick[nx * (blockDim.y * block_d + blockDim.y - 1) + (blockDim.x * block_l)
+ blockDim.x - 1];
block[blockDim.x + 1] = tick[nx * (blockDim.y * block_d + blockDim.y - 1) + (blockDim.x * block_r)];
block[(blockDim.x + 2) * (blockDim.y + 1)] = tick[nx * (blockDim.y * block_u)
+ (blockDim.x * block_l) + blockDim.x - 1];
block[(blockDim.x + 2) * (blockDim.y + 2) - 1] = tick[nx * (blockDim.y * block_u)
+ (blockDim.x * block_r)];
__syncthreads();
// Indexes of rows/columns next to id_b
unsigned int x_l, x_r, y_u, y_d;
// Calculate indexes
x_r = threadIdx.x + 2;
x_l = threadIdx.x;
y_u = threadIdx.y + 2;
y_d = threadIdx.y;
// Count alive neighbours (out of eight)
int neighbours = 0;
if (block[(threadIdx.y + 1) * (blockDim.x + 2) + x_l] == ALIVE) neighbours++;
if (block[y_u * (blockDim.x + 2) + x_l] == ALIVE) neighbours++;
if (block[y_d * (blockDim.x + 2) + x_l] == ALIVE) neighbours++;
if (block[(threadIdx.y + 1) * (blockDim.x + 2) + x_r] == ALIVE) neighbours++;
if (block[y_u * (blockDim.x + 2) + x_r] == ALIVE) neighbours++;
if (block[y_d * (blockDim.x + 2) + x_r] == ALIVE) neighbours++;
if (block[y_u * (blockDim.x + 2) + threadIdx.x + 1] == ALIVE) neighbours++;
if (block[y_d * (blockDim.x + 2) + threadIdx.x + 1] == ALIVE) neighbours++;
// Apply game of life rules
if (block[id_b] == ALIVE)
{
if (neighbours == 2 || neighbours == 3)
// Cell lives on
tock[id] = ALIVE;
else
// Cell dies by over/under population
tock[id] = DEAD;
}
else
{
if (neighbours == 3)
// Cell becomes alive through reproduction
tock[id] = ALIVE;
else
// Remains dead
tock[id] = DEAD;
}
}
/*************************************************************************************
* Main function
************************************************************************************/
int main(int argc, char** argv)
{
// Check we have a starting state file
if (argc != 5)
{
printf("Usage:\n./gameoflife input.dat input.params bx by\n");
printf("\tinput.dat\tpattern file\n");
printf("\tinput.params\tparameter file defining board size\n");
printf("\tbx by\tsizes of thread blocks - must divide the board size equally\n");
return EXIT_FAILURE;
}
// Board dimensions and iteration total
unsigned int nx, ny;
unsigned int iterations;
unsigned int bx = atoi(argv[3]);
unsigned int by = atoi(argv[4]);
load_params(argv[2], &nx, &ny, &iterations);
// Allocate memory for boards
size_t size = nx * ny * sizeof(char);
char* h_board = (char*)calloc(nx * ny, sizeof(char));
char* d_board_tick;
char* d_board_tock;
errorCheck(cudaMalloc(&d_board_tick, size));
errorCheck(cudaMalloc(&d_board_tock, size));
// Load in the starting state to board_tick
load_board(h_board, argv[1], nx, ny);
// Display the starting state
printf("Starting state\n");
print_board(h_board, nx, ny);
// Copy the host array to the device array
errorCheck(cudaMemcpy(d_board_tick, h_board, size, cudaMemcpyHostToDevice));
// Define our problem size for CUDA
dim3 numBlocks(nx / bx, ny / by);
dim3 numThreads(bx, by);
size_t sharedMem = sizeof(char) * (bx + 2) * (by + 2);
// Loop
for (unsigned int i = 0; i < iterations; i++)
{
// Apply the rules of Life
accelerate_life<<<numBlocks, numThreads, sharedMem>>>(d_board_tick, d_board_tock, nx, ny);
errorCheck(cudaPeekAtLastError());
// Swap the boards over
char* tmp = d_board_tick;
d_board_tick = d_board_tock;
d_board_tock = tmp;
}
// Copy the device array back to the host
errorCheck(cudaMemcpy(h_board, d_board_tick, size, cudaMemcpyDeviceToHost));
// Display the final state
printf("Finishing state\n");
print_board(h_board, nx, ny);
// Save the final state of the board
save_board(h_board, nx, ny);
return EXIT_SUCCESS;
}
/*************************************************************************************
* Utility functions
************************************************************************************/
// Function to load the params file and set up the X and Y dimensions
void load_params(const char* file, unsigned int* nx, unsigned int* ny, unsigned int* iterations)
{
FILE* fp = fopen(file, "r");
if (!fp) die("Could not open params file.", __LINE__, __FILE__);
int retval;
retval = fscanf(fp, "%d\n", nx);
if (retval != 1) die("Could not read params file: nx.", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", ny);
if (retval != 1) die("Could not read params file: ny", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", iterations);
if (retval != 1) die("Could not read params file: iterations", __LINE__, __FILE__);
fclose(fp);
}
// Function to load in a file which lists the alive cells
// Each line of the file is expected to be: x y 1
void load_board(char* board, const char* file, const unsigned int nx, const unsigned int ny)
{
FILE* fp = fopen(file, "r");
if (!fp) die("Could not open input file.", __LINE__, __FILE__);
int retval;
unsigned int x, y, s;
while ((retval = fscanf(fp, "%d %d %d\n", &x, &y, &s)) != EOF)
{
if (retval != 3) die("Expected 3 values per line in input file.", __LINE__, __FILE__);
if (x > nx - 1) die("Input x-coord out of range.", __LINE__, __FILE__);
if (y > ny - 1) die("Input y-coord out of range.", __LINE__, __FILE__);
if (s != ALIVE) die("Alive value should be 1.", __LINE__, __FILE__);
board[x + y * nx] = ALIVE;
}
fclose(fp);
}
// Function to print out the board to stdout
// Alive cells are displayed as O
// Dead cells are displayed as .
void print_board(const char* board, const unsigned int nx, const unsigned int ny)
{
for (unsigned int i = 0; i < ny; i++)
{
for (unsigned int j = 0; j < nx; j++)
{
if (board[i * nx + j] == DEAD)
printf(".");
else
printf("O");
}
printf("\n");
}
}
void save_board(const char* board, const unsigned int nx, const unsigned int ny)
{
FILE* fp = fopen(FINALSTATEFILE, "w");
if (!fp) die("Could not open final state file.", __LINE__, __FILE__);
for (unsigned int i = 0; i < ny; i++)
{
for (unsigned int j = 0; j < nx; j++)
{
if (board[i * nx + j] == ALIVE) fprintf(fp, "%d %d %d\n", j, i, ALIVE);
}
}
}
void errorCheck(cudaError_t error)
{
if (error != cudaSuccess) die(cudaGetErrorString(error), __LINE__, __FILE__);
}
// Function to display error and exit nicely
void die(const char* message, const int line, const char* file)
{
fprintf(stderr, "Error at line %d of file %s:\n", line, file);
fprintf(stderr, "%s\n", message);
fflush(stderr);
exit(EXIT_FAILURE);
}
|
13,056 | #include <assert.h>
#include <stdio.h>
#include <cuda.h>
// declaration of function to check for errors
void checkCudaError(const char* msg);
// kernel
__global__ void reverseArrayBlock(int *d_out, int *d_in) {
int inOffset = blockDim.x * blockIdx.x;
int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x);
int in = inOffset + threadIdx.x;
int out = outOffset + (blockDim.x - 1 - threadIdx.x);
d_out[out] = d_in[in];
}
// main thread
int main(int argc, char** argv) {
int *h_a;
int dimA = 256 * 1024;
int *d_b, *d_a;
int numThreadsPerBlock = 256;
// compute number of blocks needed
int numBlocks = dimA / numThreadsPerBlock;
// allocate memory
size_t memSize = numBlocks * numThreadsPerBlock*sizeof(int);
h_a = (int *)malloc(memSize);
cudaMalloc((void **) &d_a, memSize);
cudaMalloc((void **) &d_b, memSize);
// init array on host
for (int i=0; i<dimA; i++) {
h_a[i] = i;
}
cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice);
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
reverseArrayBlock <<< dimGrid, dimBlock >>> (d_b, d_a);
// block until synced
cudaThreadSynchronize();
checkCudaError("kernel invocation");
cudaMemcpy(h_a, d_b, memSize, cudaMemcpyDeviceToHost);
checkCudaError("memcpy");
for (int i=0; i<dimA; i++) {
assert( h_a[i] == dimA - 1 - i);
}
cudaFree(d_a); cudaFree(d_b);
free(h_a);
return 0;
}
void checkCudaError(const char* msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s : %s .\n", msg, cudaGetErrorString(err) );
exit(EXIT_FAILURE);
}
}
|
13,057 | /***************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
***************************************************************************/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include "UDTypes.h"
#define TILE 64
#define LOG_TILE 6
__constant__ float cutoff2_c;
__constant__ float cutoff_c;
__constant__ int gridSize_c[3];
__constant__ int size_xy_c;
__constant__ float _1overCutoff2_c;
__global__ void binning_kernel (unsigned int n, ReconstructionSample* sample_g, unsigned int* idxKey_g,
unsigned int* idxValue_g, unsigned int* binCount_g, unsigned int binsize, unsigned int gridNumElems){
unsigned int key;
unsigned int sampleIdx = blockIdx.x*blockDim.x+threadIdx.x;
ReconstructionSample pt;
unsigned int binIdx;
unsigned int count;
if (sampleIdx < n){
pt = sample_g[sampleIdx];
binIdx = (unsigned int)(pt.kZ)*size_xy_c + (unsigned int)(pt.kY)*gridSize_c[0] + (unsigned int)(pt.kX);
count = atomicAdd(binCount_g+binIdx, 1);
if (count < binsize){
key = binIdx;
} else {
atomicSub(binCount_g+binIdx, 1);
key = gridNumElems;
}
idxKey_g[sampleIdx] = key;
idxValue_g[sampleIdx] = sampleIdx;
}
}
__global__ void reorder_kernel(int n, unsigned int* idxValue_g, ReconstructionSample* samples_g, ReconstructionSample* sortedSample_g){
unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int old_index;
ReconstructionSample pt;
if (index < n){
old_index = idxValue_g[index];
pt = samples_g[old_index];
sortedSample_g[index] = pt;
}
}
__device__ float kernel_value(float v){
float rValue = 0;
float z = v*v;
// polynomials taken from http://ccrma.stanford.edu/CCRMA/Courses/422/projects/kbd/kbdwindow.cpp
float num = (z* (z* (z* (z* (z* (z* (z* (z* (z* (z* (z* (z* (z*
(z* 0.210580722890567e-22f + 0.380715242345326e-19f ) +
0.479440257548300e-16f) + 0.435125971262668e-13f ) +
0.300931127112960e-10f) + 0.160224679395361e-7f ) +
0.654858370096785e-5f) + 0.202591084143397e-2f ) +
0.463076284721000e0f) + 0.754337328948189e2f ) +
0.830792541809429e4f) + 0.571661130563785e6f ) +
0.216415572361227e8f) + 0.356644482244025e9f ) +
0.144048298227235e10f);
float den = (z*(z*(z-0.307646912682801e4f)+0.347626332405882e7f)-0.144048298227235e10f);
rValue = __fdividef(-num,den);
return rValue;
}
__global__ void gridding_GPU (ReconstructionSample* sample_g, unsigned int* binStartAddr_g, float2* gridData_g, float* sampleDensity_g, float beta){
__shared__ ReconstructionSample sharedBin[TILE];
const int flatIdx = threadIdx.z*blockDim.y*blockDim.x+threadIdx.y*blockDim.x+threadIdx.x;
// figure out starting point of the tile
const int z0 = blockDim.z*(blockIdx.y/(gridSize_c[1]/blockDim.y));
const int y0 = blockDim.y*(blockIdx.y%(gridSize_c[1]/blockDim.y));
const int x0 = blockIdx.x*blockDim.x;
const int X = x0+threadIdx.x;
const int Y = y0+threadIdx.y;
const int Z = z0+threadIdx.z;
const int xl = x0-ceil(cutoff_c);
const int xL = (xl < 0) ? 0 : xl;
const int xh = x0+blockDim.x+cutoff_c;
const int xH = (xh >= gridSize_c[0]) ? gridSize_c[0]-1 : xh;
const int yl = y0-ceil(cutoff_c);
const int yL = (yl < 0) ? 0 : yl;
const int yh = y0+blockDim.y+cutoff_c;
const int yH = (yh >= gridSize_c[1]) ? gridSize_c[1]-1 : yh;
const int zl = z0-ceil(cutoff_c);
const int zL = (zl < 0) ? 0 : zl;
const int zh = z0+blockDim.z+cutoff_c;
const int zH = (zh >= gridSize_c[2]) ? gridSize_c[2]-1 : zh;
const int idx = Z*size_xy_c + Y*gridSize_c[0] + X;
float2 pt;
pt.x = 0.0;
pt.y = 0.0;
float density = 0.0;
for (int z = zL; z <= zH; z++){
for (int y = yL; y <= yH; y++){
const unsigned int *addr = binStartAddr_g+z*size_xy_c+ y*gridSize_c[0];
const unsigned int start = *(addr+xL);
const unsigned int end = *(addr+xH+1);
const unsigned int delta = end-start;
for (int x = 0; x < ((delta+TILE-1)>>LOG_TILE); x++){
int tileSize = ((delta-(x<<LOG_TILE)) > TILE) ? TILE : (delta-(x<<LOG_TILE));
int globalIdx = flatIdx+(x<<LOG_TILE);
__syncthreads();
if(flatIdx < tileSize){
sharedBin[flatIdx] = sample_g[start+globalIdx];
}
__syncthreads();
for (int j=0; j< tileSize; j++){
const float real = sharedBin[j].real;
const float imag = sharedBin[j].imag;
const float sdc = sharedBin[j].sdc;
if((real != 0.0 || imag != 0.0) && sdc != 0.0){
float v = (sharedBin[j].kX-X)*(sharedBin[j].kX-X);
v += (sharedBin[j].kY-Y)*(sharedBin[j].kY-Y);
v += (sharedBin[j].kZ-Z)*(sharedBin[j].kZ-Z);
if(v<cutoff2_c){
const float w = kernel_value(beta*sqrtf(1.0-(v*_1overCutoff2_c))) *sdc;
pt.x += w*real;
pt.y += w*imag;
density += 1.0;
}
}
}
}
}
}
gridData_g[idx] = pt;
sampleDensity_g[idx] = density;
}
|
13,058 | #include "includes.h"
__global__ void colorToGray(unsigned char *input, unsigned char *output, int height, int width)
{
int col= blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
float scale[3] = {0.299, 0.587, 0.114};
if (row < height && col < width)
{
int pixelIndex = row * width + col;
int rgbIndex = pixelIndex * 3;
unsigned char r = input[rgbIndex]; // rgb rgb rgb rgb rgb
unsigned char g = input[rgbIndex + 1];
unsigned char b = input[rgbIndex + 2];
output[pixelIndex] = r* scale[0] + g * scale[1] + b*scale[2];
}
} |
13,059 | #include "includes.h"
__global__ void kernel_copy_NN_with_NN_assuption(double *d_temp_double_mem, int *d_nearest_neighbour_indexes, int number_of_points)
{
int index=blockIdx.x*blockDim.x+threadIdx.x;
if(index < number_of_points)
{
int i = d_nearest_neighbour_indexes[index];
if(i != -1)
{
d_temp_double_mem[index] = 1.0f;
}else
{
d_temp_double_mem[index] = 0.0f;
}
}
} |
13,060 | #include <vector>
#include <string>
#include <iostream>
#include <algorithm>
#include <sstream>
#include <ctime>
#include <signal.h>
#include <unistd.h>
#include <stdio.h>
#include <fstream>
#include <stdlib.h>
#include <math.h>
#include <unordered_set>
#include <curand.h>
#include <curand_kernel.h>
using namespace std;
struct Edge;
struct Vertex;
struct Layer;
int random(int min, int max);
float average(float average, float dataPoint);
void setupListeners();
template<typename T>
inline void removeFromVector(vector<T> & v, const T & item);
struct Edge {
float weight;
float change;
Vertex* from;
Vertex* to;
};
struct Vertex {
vector<Edge*> inputs;
vector<Edge*> outputs;
float output;
float bias;
int layer;
bool isOutput;
int index;
float delta;
float error;
};
struct Layer {
vector<Vertex*> nodes;
vector<Edge*> edges;
int* edgeNodeMapping;
};
class NN {
public:
NN(bool useGPU, int input, vector<int> hidden, int output);
vector<Layer> layers;
vector<Vertex*> nodes;
vector<Edge*> edges;
int inputSize;
int outputSize;
int outputIdx;
bool useGPU;
int nodeIdx;
Vertex* addVertex(std::vector<Vertex*> inputs, std::vector<Vertex*> outputs);
Vertex* addVertex(int inputCount, int outputCount, int layer, bool useBasicLayering);
Vertex* addVertex(int inputs, int outputs, bool isInput, bool isOutput, int layer, bool useStrictLayers);
void removeVertex();
Edge* addEdge(Vertex* from, Vertex* to, bool addLayer);
Edge* addEdge(Vertex* from, Vertex* to);
void removeEdge(Edge* edge);
int findLayer(Vertex* vertex);
void initRNGCPU();
float trainGPU(vector<float> inputs, vector<float> target);
float trainGPU(vector<float> inputs, vector<float> target, float learningRate, float momentum);
vector<float> runGPULauncher(vector<float>& inputs);
vector<float> runCpu(vector<float>& inputs);
vector<float> run(vector<float>& inputs);
void updateHostMemory();
void updateDeviceMemory();
void print(ostream& output);
double layerDist(double x, int mean);
// CUDA pointers
float* d_weights;
float* d_outputs;
float* d_bias;
int* d_edgeNodeMappingTo;
int* d_edgeNodeMappingFrom;
float* d_errors;
float* d_deltas;
float* d_changes;
float* d_target;
bool* d_managementThreads;
curandState_t* RNGStates;
};
bool userEntryMode = false;
bool running = true;
void my_handler(int s){
printf("Caught signal %d. Entering user input mode.\n",s);
if (userEntryMode) {
running = false;
}
userEntryMode = true;
setupListeners();
}
void setupListeners() {
struct sigaction sigIntHandler;
sigIntHandler.sa_handler = my_handler;
sigemptyset(&sigIntHandler.sa_mask);
sigIntHandler.sa_flags = 0;
sigaction(SIGINT, &sigIntHandler, NULL);
}
/*int test(bool useGPU, vector<vector<float>> inputs, vector<vector<float>> targets, vector<int> hidden) {
int inputSize = inputs[0].size();
int outputSize = targets[0].size();
srand(0);
setupListeners();
NN nn(useGPU, inputSize, hidden, outputSize);
//ofstream outputFile(string(useGPU?"gpu":"cpu") + ".graph", ofstream::out);
//nn.print(outputFile);
clock_t begin = clock();
vector<float> target(outputSize);
vector<float> input(inputSize);
float error = 1.0;
do {
int index = random(0, inputSize);
if (userEntryMode) {
std::cout << "\n";
cin >> iInput;
std::cout << "\n";
}
} while(error > 0 && running);
}*/
int test(bool useGPU, int inputSize, vector<int> hidden, int outputSize) {
srand(0);
setupListeners();
NN nn(useGPU, inputSize, hidden, outputSize);
ofstream outputFile(string(useGPU?"gpu":"cpu") + ".graph", ofstream::out);
nn.print(outputFile);
clock_t begin = clock();
vector<float> target(outputSize);
vector<float> input(inputSize);
float error = 1.0;
do {
int iInput = random(100, 1000);
if (userEntryMode) {
std::cout << "\n";
cin >> iInput;
std::cout << "\n" << iInput << "\n";
}
int n = iInput;
int i = 0;
while (n) {
input[i++] = (n % 10)/10.0;
n /= 10;
}
//input[0] = iInput/1000.0;
target[0] = iInput%2==0?1.0:0.0;
//target[0] = (iInput>500?1.0:0.0);
std::cout << "Expected: " << target[0] << ", Input: " << iInput << ", ";
if (userEntryMode) {
float result = nn.runGPULauncher(input)[0];
cout << "Output: " << result << "\n";
}else{
error = average(error, nn.trainGPU(input, target, error*2, error));
std::cout << "\rError: " << error;
}
} while(error > 0.005 && running);
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
cout << "Time ms: " << elapsed_secs*1000 << "\n";
return 0;
}
int main(int argc, char** argv) {
vector<int> hiddenSizes;
hiddenSizes.push_back(5);
hiddenSizes.push_back(10);
hiddenSizes.push_back(50);
hiddenSizes.push_back(200);
hiddenSizes.push_back(50);
hiddenSizes.push_back(10);
//hiddenSizes.push_back(1);
for (int i=0; i<1; i++) {
test(true, 3, hiddenSizes, 1);
}
cout << "--------------------------\n";
/*for (int i=0; i<1; i++) {
test(false, 1, hiddenSizes, 1);
}*/
}
// layers: [inputSize, hiddenSize, outputSize]
NN::NN(bool useGPU, int input, vector<int> hidden, int output) {
// comment in for release
//srand(time(NULL));
inputSize = 0;
outputSize = 0;
outputIdx = 0;
nodeIdx = 0;
this->useGPU = useGPU;
int nodes = 0;
for (int i=0; i<input; ++i) {
addVertex(vector<Vertex*>(), vector<Vertex*>());
//addVertex(0, 0, true, false, 0, false);
nodes++;
inputSize++;
}
for (int i=0; i<hidden.size(); ++i) {
int prevLayerNodes = nodes;
for (int j=0; j<hidden[i]; ++j) {
addVertex(random(1, prevLayerNodes), 0, i+1, false);
//addVertex(random(1, prevLayerNodes), 0, false, false, i+1, false);
nodes++;
}
}
for (int i=0; i<output; ++i) {
addVertex(random(1, nodes), 0, hidden.size()+1, false);
//addVertex(random(1, nodes), 0, false, true, 2, false);
outputSize++;
}
/*for (int i=0; i<layers.size()-1; ++i) {
for (int j=0; j<layers[i].nodes.size(); ++j) {
Vertex* vertex = layers[i].nodes[j];
if (vertex->outputs.size() == 0) {
Layer& outputLayer = layers[i+1];
Vertex* outputVertex = outputLayer.nodes[
random(0,
outputLayer.nodes.size()-1
)
];
addEdge(vertex, outputVertex, true);
}
}
}*/
if (useGPU) {
updateDeviceMemory();
}
}
double NN::layerDist(double x, int mean) {
return -1*pow(2*x-mean,2)+mean;
}
void NN::updateDeviceMemory() {
initRNGCPU();
int edgeCount = edges.size();
int nodeCount = nodes.size();
size_t floatEdge = sizeof(float)*edgeCount;
size_t floatNode = sizeof(float)*nodeCount;
size_t intEdge = sizeof(int)*edgeCount;
// TODO Free previous device memory
// Malloc device memory
cudaMalloc(&d_weights, floatEdge);
cudaMalloc(&d_outputs, floatNode);
cudaMalloc(&d_bias, floatNode);
cudaMalloc(&d_edgeNodeMappingTo, intEdge);
cudaMalloc(&d_edgeNodeMappingFrom, intEdge);
cudaMalloc(&d_errors, floatNode);
cudaMalloc(&d_deltas, floatNode);
cudaMalloc(&d_changes, floatEdge);
cudaMalloc(&d_target, sizeof(float)*outputSize);
cudaMalloc(&d_managementThreads, sizeof(bool)*edgeCount);
cudaMemset(d_outputs, 0, floatNode);
cudaMemset(d_changes, 0, floatEdge);
float* weights = (float*) malloc(floatEdge);
float* bias = (float*) malloc(floatNode);
int* edgeNodeMappingTo = (int*) malloc(intEdge);
int* edgeNodeMappingFrom = (int*) malloc(intEdge);
float* errors = (float*) malloc(floatNode);
float* deltas = (float*) malloc(floatNode);
float* changes = (float*) malloc(floatEdge);
bool* managementThreads = (bool*) malloc(sizeof(bool)*edgeCount);
int currEdge = 0;
int currNode = 0;
for (int i=0; i<layers.size(); ++i) {
int edgesInLayer = 0;
for (int j=0; j<layers[i].nodes.size(); ++j) {
Vertex* node = layers[i].nodes[j];
bias[currNode] = node->bias;
errors[currNode] = node->error;
deltas[currNode] = node->delta;
for (int k=0; k<node->inputs.size(); ++k) {
edgesInLayer++;
Edge* edge = node->inputs[k];
weights[currEdge] = edge->weight;
changes[currEdge] = edge->change;
managementThreads[currEdge] = (k==0);
edgeNodeMappingTo[currEdge] = currNode;
edgeNodeMappingFrom[currEdge] = edge->from->index;
currEdge++;
}
int inputCount = node->inputs.size();
node->index = currNode;
currNode++;
}
cout << "Edges In Layer " << i << " = " << edgesInLayer << "\n";
}
for (int i=0; i<edgeCount; ++i) {
if (edgeNodeMappingTo[i] < 0 || edgeNodeMappingTo[i]>nodeCount)
cout << "edgeNodeMappingTo: [" << i << "]: " << edgeNodeMappingTo[i] << "\n";
if (edgeNodeMappingFrom[i] < 0 || edgeNodeMappingFrom[i]>nodeCount)
cout << "edgeNodeMappingFrom: [" << i << "]: " << edgeNodeMappingFrom[i] << "\n";
if (weights[i] < 0 || weights[i]>1)
cout << "weights: [" << i << "]: " << weights[i] << "\n";
}
for (int i=0; i<edgeCount; ++i) {
if (edgeNodeMappingTo[i] < 0 || edgeNodeMappingTo[i]>nodeCount)
cout << "edgeNodeMappingTo: [" << i << "]: " << edgeNodeMappingTo[i] << "\n";
}
cudaMemcpy(d_weights, weights, floatEdge, cudaMemcpyHostToDevice);
cudaMemcpy(d_bias, bias, floatNode, cudaMemcpyHostToDevice);
cudaMemcpy(d_edgeNodeMappingTo, edgeNodeMappingTo, intEdge, cudaMemcpyHostToDevice);
cudaMemcpy(d_edgeNodeMappingFrom, edgeNodeMappingFrom, intEdge, cudaMemcpyHostToDevice);
cudaMemcpy(d_errors, bias, floatNode, cudaMemcpyHostToDevice);
cudaMemcpy(d_deltas, bias, floatNode, cudaMemcpyHostToDevice);
cudaMemcpy(d_changes, bias, floatNode, cudaMemcpyHostToDevice);
cudaMemcpy(d_managementThreads, managementThreads, sizeof(bool)*edgeCount, cudaMemcpyHostToDevice);
free(weights);
free(bias);
free(edgeNodeMappingTo);
free(edgeNodeMappingFrom);
free(errors);
free(deltas);
free(changes);
free(managementThreads);
}
void NN::updateHostMemory() {
int edgeCount = edges.size();
int nodeCount = nodes.size();
size_t floatEdge = sizeof(float)*edgeCount;
size_t floatNode = sizeof(float)*nodeCount;
size_t intEdge = sizeof(int)*edgeCount;
//size_t intNode = sizeof(int)*nodeCount;
float* weights = (float*) malloc(floatEdge);
float* bias = (float*) malloc(floatNode);
int* edgeNodeMappingTo = (int*) malloc(intEdge);
int* edgeNodeMappingFrom = (int*) malloc(intEdge);
float* errors = (float*) malloc(floatNode);
float* deltas = (float*) malloc(floatNode);
float* changes = (float*) malloc(floatEdge);
cudaMemcpy(weights, d_weights, floatEdge, cudaMemcpyDeviceToHost);
cudaMemcpy(bias, d_bias, floatNode, cudaMemcpyDeviceToHost);
cudaMemcpy(edgeNodeMappingTo, d_edgeNodeMappingTo, intEdge, cudaMemcpyDeviceToHost);
cudaMemcpy(edgeNodeMappingFrom, d_edgeNodeMappingFrom, intEdge, cudaMemcpyDeviceToHost);
cudaMemcpy(errors, d_errors, floatNode, cudaMemcpyDeviceToHost);
cudaMemcpy(deltas, d_deltas, floatNode, cudaMemcpyDeviceToHost);
cudaMemcpy(changes, d_changes, floatNode, cudaMemcpyDeviceToHost);
unordered_set<int> nodesCreated;
vector<Vertex*> emptyEdgeList;
for (int i=0; i<edgeCount; ++i) {
int nodeTo = edgeNodeMappingTo[i];
int nodeFrom = edgeNodeMappingFrom[i];
int nodeToCreate = -1;
if (!nodesCreated.count(nodeTo)) {
nodeToCreate = nodeTo;
}else if (!nodesCreated.count(nodeFrom)) {
nodeToCreate = nodeFrom;
}
if (nodeToCreate != -1) {
nodesCreated.insert(nodeToCreate);
Vertex* vertex = addVertex(emptyEdgeList, emptyEdgeList);
vertex->delta = deltas[nodeToCreate];
vertex->error = errors[nodeToCreate];
//vertex->change = changes[nodeToCreate];
vertex->bias = bias[nodeToCreate];
}
}
free(weights);
free(bias);
free(edgeNodeMappingTo);
free(edgeNodeMappingFrom);
free(errors);
free(deltas);
free(changes);
}
vector<float> NN::run(vector<float>& inputs) {
if (useGPU) {
return runGPULauncher(inputs);
}else{
return runCpu(inputs);
}
}
__global__ void initRNG(unsigned int seed, curandState_t* states) {
/* we have to initialize the state */
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
blockIdx.x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[blockIdx.x]);
}
void NN::initRNGCPU() {
int N = edges.size()/512+1;
cudaMalloc((void**) &RNGStates, N * sizeof(curandState_t));
initRNG<<<N, 1>>>(/*time(0)*/0, RNGStates);
}
__global__ void runGPU(float* weights, int* edgeNodeMappingTo, int* edgeNodeMappingFrom, float* outputs, float* bias, bool* managementThreads, int offset, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if (id < n) {
int nodeTo = edgeNodeMappingTo[id];
int nodeFrom = edgeNodeMappingFrom[id];
atomicAdd(&outputs[nodeTo], outputs[nodeFrom] * weights[id]);
/*if (managementThreads[id]) {
outputs[nodeTo] = 1/(1+exp(-1*(outputs[nodeTo]+bias[nodeTo])));
}*/
}
}
__global__ void runGPUPost(float* outputs, float* bias, int offset, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if (id < n) {
outputs[id] = 1/(1+exp(-1*(outputs[id]+bias[id])));
}
}
__global__ void runGPUProb(float* weights, int* edgeNodeMappingTo, int* edgeNodeMappingFrom, float* outputs, float* bias, int offset, int n, curandState_t* states) {
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if (id < n) {
int nodeTo = edgeNodeMappingTo[id];
int nodeFrom = edgeNodeMappingFrom[id];
if (curand_uniform(&states[blockIdx.x]) >= weights[id]) {
atomicAdd(&outputs[nodeTo], outputs[nodeFrom]);
}
outputs[nodeTo] = 1/(1+exp(-1*(outputs[nodeTo]+bias[nodeTo])));
}
}
__global__ void runGPUMultiplex(bool* activeEdges, float* weights, int* edgeNodeMappingTo, int* edgeNodeMappingFrom, float* outputs, float* bias, int offset, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if (id < n) {
int nodeTo = edgeNodeMappingTo[id];
int nodeFrom = edgeNodeMappingFrom[id];
if (activeEdges[id]) {
atomicAdd(&outputs[nodeTo], outputs[nodeFrom] * weights[id]);
}
outputs[nodeTo] = 1/(1+exp(-1*(outputs[nodeTo]+bias[nodeTo])));
//outputs[nodeTo]
}
}
/*
learningRate
momentum
target: Node-wise
weights: Edge-wise
outputs: Node-wise
edgeNodeMappingFrom: Edge-wise
edgeNodeMappingTo: Edge-wise
nodeRunCount: Node-wise
initialNodeRunCount: Node-wise
errors: Node-wise
deltas: Node-wise
bias: Node-wise
changes: Edge-wise
n
*/
__global__ void learnGPU(float learningRate,
float momentum,
float* weights,
float* outputs,
int* edgeNodeMappingFrom,
int* edgeNodeMappingTo,
float* errors,
float* deltas,
float* bias,
float* changes,
int offset,
int n,
float* buffer) {
int id = blockIdx.x*blockDim.x+threadIdx.x+offset;
if (id < offset+n && id>=0) {
int nodeTo = edgeNodeMappingTo[id];
int nodeFrom = edgeNodeMappingFrom[id];
float output = outputs[nodeFrom];
float& weight = weights[id];
float delta = deltas[nodeTo];
atomicAdd(&errors[nodeFrom], delta * weight);
deltas[nodeFrom] = errors[nodeFrom] * output * (1-output);
//atomicAdd(&nodeRunCount[nodeFrom], -1);
//if (nodeRunCount[nodeFrom] == 0) {
//bias[nodeTo] += learningRate * delta;
// nodeRunCount[nodeFrom] = initialNodeRunCount[nodeFrom];
//}
float& change = changes[id];
change = (learningRate * delta * output)
+ (momentum * change);
weight += change;
}
}
__global__ void learnGPUPost(float* deltas, float* bias, float learningRate, int offset, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x+offset;
if (id < offset+n && id>=0) {
bias[id] = learningRate * deltas[id];
}
}
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
float NN::trainGPU(vector<float> inputs, vector<float> target) {
return trainGPU(inputs, target, 0.3, 0.1);
}
float NN::trainGPU(vector<float> inputs, vector<float> target, float learningRate, float momentum) {
vector<float> results = runGPULauncher(inputs);
int nodeSize = nodes.size();
int outputSize = target.size();
float errors[nodeSize];
float deltas[nodeSize];
memset(errors, 0, sizeof errors);
memset(deltas, 0, sizeof deltas);
float errorSum = 0;
for (int i=1; i<=outputSize; ++i) {
//cout << "output[1] = " << results[results.size()-i] << ", output[2] = " << std::round(results[results.size()-i]) << "\n";
float output = results[results.size()-i];
std::cout << "Output: " << output << "\n";
errors[nodeSize-i] = target[outputSize-i] - output;
deltas[nodeSize-i] = errors[nodeSize-i] * output * (1-output);
errorSum += errors[nodeSize-i];
}
float error = abs(errorSum/outputSize);
//std::cout << "Error: " << error << "\n";
cudaMemcpy(d_errors, &errors[0], nodeSize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_deltas, &deltas[0], nodeSize*sizeof(float), cudaMemcpyHostToDevice);
int offset = edges.size();
int nodesOffset = nodes.size();
float* d_buffer;
//cudaMalloc(&d_buffer, offset*sizeof(float));
for (int i=layers.size()-1; i>0; --i) {
int N = layers[i].edges.size();
int nodesN = layers[i].nodes.size();
offset -= N;
nodesOffset -= nodesN;
//std::cout << "N["<<i<<"]: "<<N<<", offset["<<i<<"]: " << offset << "\n";
int edgeCount = layers[i].edges.size();
int nodeCount = layers[i].nodes.size();
int blockSize = 512;// or 64?
int gridSize = edgeCount/blockSize + 1;
learnGPU<<<gridSize, blockSize>>>(learningRate,
momentum,
d_weights,
d_outputs,
d_edgeNodeMappingFrom,
d_edgeNodeMappingTo,
d_errors,
d_deltas,
d_bias,
d_changes,
offset,
N,
d_buffer);
blockSize = 512;// or 64?
gridSize = nodeCount/blockSize + 1;
learnGPUPost<<<gridSize, blockSize>>>(d_deltas, d_bias, learningRate, nodesOffset, nodesN);
//cudaCheckErrors("kernel");
}
/*float* buffer = (float*) (sizeof(float)*edges.size());
cudaMemcpy(buffer, d_buffer, edges.size()*sizeof(float), cudaMemcpyDeviceToHost);
for (int i=0; i<edges.size(); ++i) {
std::cout << "buffer["<<i<<"] = " << buffer[i] << "\n";
}*/
return error;
}
vector<float> NN::runGPULauncher(vector<float>& inputs) {
clock_t begin = clock();
//cout << "Time ms: " << ((clock() - begin)/(double)CLOCKS_PER_SEC)*1000 << "\n";
for (int i=inputs.size(); i<nodes.size(); ++i) {
inputs.push_back(0);
}
cudaMemcpy(d_outputs, &inputs[0], inputs.size()*sizeof(float), cudaMemcpyHostToDevice);
//cudaCheckErrors("copy");
//cout << "Time Post Copy: " << ((clock() - begin)/(double)CLOCKS_PER_SEC)*1000 << "\n";
int offset = 0;
int nodeOffset = layers[0].nodes.size();
//cout << "\nnodeCount: " << layers[0].nodes.size() << "\n";
for (int i=1; i<layers.size(); ++i) {
int edgeCount = layers[i].edges.size();
int nodeCount = layers[i].nodes.size();
//cout << "nodeCount: " << nodeCount << "\n";
int gridSize, blockSize;
blockSize = 512;// or 64?
gridSize = edgeCount/blockSize + 1;
runGPU<<<gridSize, blockSize>>>(d_weights, d_edgeNodeMappingTo, d_edgeNodeMappingFrom, d_outputs, d_bias, d_managementThreads, offset, offset+edgeCount);
//runGPUProb<<<gridSize, blockSize>>>(d_weights, d_edgeNodeMappingTo, d_edgeNodeMappingFrom, d_outputs, d_bias, offset, offset+edgeCount, RNGStates);
blockSize = 512;// or 64?
gridSize = nodeCount/blockSize + 1;
runGPUPost<<<gridSize, blockSize>>>(d_outputs, d_bias, nodeOffset, nodeOffset+nodeCount);
//cout << "Time Post Kernel " << i << ": " << ((clock() - begin)/(double)CLOCKS_PER_SEC)*1000 << "\n";
//cudaCheckErrors("kernel");
offset += edgeCount;
nodeOffset += nodeCount;
}
int outputLayerSize = layers[layers.size()-1].nodes.size();
//cout << "\nOutput Size = " << outputLayerSize << "\n";
float* outputs = (float*) malloc( sizeof(float)*outputLayerSize );
//float* weights = (float*) malloc( sizeof(float)*edges.size() );
cudaMemcpy(outputs, d_outputs+(nodes.size()-outputLayerSize), outputLayerSize*sizeof(float), cudaMemcpyDeviceToHost);
//cudaMemcpy(weights, d_weights, edges.size()*sizeof(float), cudaMemcpyDeviceToHost);
for (int i=0; i<edges.size(); ++i) {
// std::cout << "Weight["<<i<<"] = " << weights[i] << "\n";
}
//cout << "Time post copy output: " << ((clock() - begin)/(double)CLOCKS_PER_SEC)*1000 << "\n";
vector<float> result(outputs, outputs + outputLayerSize);
free(outputs);
return result;
}
vector<float> NN::runCpu(vector<float>& inputs) {
vector<float> result;
while (inputs.size() < layers[0].nodes.size()) {
inputs.push_back(0);
}
// Stage input vertices
for (int i=0; i<inputs.size(); ++i) {
layers[0].nodes[i]->output = inputs[i];
result.push_back(inputs[i]);
}
int layerSize = layers.size();
// Forward propegate each layer
for (int i=1; i<layerSize; ++i) {
vector<Vertex*> layerNodes = layers[i].nodes;
for (int j=0; j<layerNodes.size(); ++j) {
Vertex* node = layerNodes[j];
float sum = node->bias;
for (int inputIdx=0; inputIdx < node->inputs.size(); ++inputIdx) {
Edge* inputEdge = node->inputs[inputIdx];
sum += inputEdge->weight * inputEdge->from->output;
}
float outputValue = 1/(1+exp(-1*sum));
node->output = outputValue;
if (i==outputIdx) {
result.push_back(outputValue);
}
}
}
return result;
}
void NN::print(ostream& output) {
// output << "View at: http://www.webgraphviz.com/\n";
output << "digraph G {\n";
stringstream edges;
for (int i=0; i<layers.size(); ++i) {
output << "\tsubgraph cluster_" << i << " {\n"
<< "\t\tstyle=filled;\n"
<< "\t\tcolor=lightgrey;\n"
<< "\t\tnode [style=filled,color=white];\n";
for (int j=0; j<layers[i].nodes.size(); ++j) {
output << "\t\t\"" << layers[i].nodes[j]->index << "\"\n";
for (int k=0; k<layers[i].nodes[j]->outputs.size(); k++) {
edges << "\t\"" << layers[i].nodes[j]->index << "\" -> \"" << layers[i].nodes[j]->outputs[k]->to->index << "\";\n";
}
}
output << "\t\tlabel = \"layer #" << i << "\";\n";
output << "\t}\n";
}
output << edges.str();
output << "}\n";
}
Vertex* NN::addVertex(std::vector<Vertex*> inputs, std::vector<Vertex*> outputs) {
Vertex* vertex = new Vertex;
vertex->index = nodeIdx++;
vertex->bias = 0.0;
vertex->error = 0.0;
vertex->delta = 0.0;
vertex->output = 0.0;
vector<Edge*> edges;
for (int i=0; i<inputs.size(); ++i) {
edges.push_back(addEdge(inputs[i], vertex));
}
for (int i=0; i<outputs.size(); ++i) {
edges.push_back(addEdge(vertex, outputs[i]));
}
nodes.push_back(vertex);
int layerIdx = findLayer(vertex);
if (layerIdx < (int)layers.size()) {
layers[layerIdx].nodes.push_back(vertex);
} else {
Layer layer;
layer.nodes.push_back(vertex);
layers.insert(layers.begin()+layerIdx, layer);
}
vertex->layer = layerIdx;
layers[layerIdx].edges.insert(layers[layerIdx].edges.end(), edges.begin(), edges.end());
return vertex;
}
Vertex* NN::addVertex(int inputCount, int outputCount, int layer, bool useBasicLayering) {
vector<Vertex*> inputs;
vector<Vertex*> outputs;
bool useBasicLayeringSave = useBasicLayering;
useBasicLayering = true;
for (int i=0; i<inputCount; ++i) {
Layer* inputLayer = useBasicLayering ? &layers[layer-1] : &layers[random(0,layer-1)];
int inputVertex = random(0, inputLayer->nodes.size()-1);
inputs.push_back(inputLayer->nodes[inputVertex]);
if (i==0) useBasicLayering=useBasicLayeringSave;
}
for (int i=0; i<outputCount; ++i) {
Layer* outputLayer;
if (useBasicLayering) {
outputLayer = &layers[layer+1];
}else{
int outputLayerId = random(0,layer+1);
if (outputLayerId == layer) {
outputLayerId++;
}
outputLayer = &layers[outputLayerId];
}
int outputVertex = random(0, outputLayer->nodes.size()-1);
outputs.push_back(outputLayer->nodes[outputVertex]);
}
return addVertex(inputs, outputs);
}
Vertex* NN::addVertex(int inputs, int outputs, bool isInput, bool isOutput, int layer, bool useStrictLayers) {
Vertex* vertex = new Vertex;
vertex->index = nodeIdx++;
vertex->isOutput = isOutput;
vertex->bias = 0.0;
vertex->error = 0.0;
vertex->delta = 0.0;
vertex->output = 0.0;
vector<Edge*> edges;
inputs = inputs>0?inputs:inputs+1;
int* edgeNodeMapping = (int*) malloc(sizeof(int)*inputs);
if (isInput) {
inputs--;
}
if (layer > 0) {
Layer* inputLayer = &layers[layer-1];
int inputVertex = random(0, inputLayer->nodes.size()-1);
edges.push_back(addEdge(inputLayer->nodes[inputVertex], vertex));
inputs--;
}
for (int i=0; i<inputs; ++i) {
Layer* inputLayer = useStrictLayers ? &layers[layer-1] : &layers[random(0,layer-1)];
int inputVertex = random(0, inputLayer->nodes.size()-1);
edges.push_back(addEdge(inputLayer->nodes[inputVertex], vertex));
}
for (int i=0; i<outputs; ++i) {
int outputVertex = random(inputSize, nodes.size());
}
nodes.push_back(vertex);
int layerIdx = findLayer(vertex);
if (isOutput && outputIdx != 0) {
isOutput = false;
layerIdx = outputIdx;
}
if (layerIdx < (int)layers.size() && !isOutput) {
layers[layerIdx].nodes.push_back(vertex);
} else {
if (isOutput) {
outputIdx = layers.size();
layerIdx = outputIdx;
}
Layer layer;
layer.nodes.push_back(vertex);
layers.insert(layers.begin()+layerIdx, layer);
}
vertex->layer = layerIdx;
layers[layerIdx].edges.insert(layers[layerIdx].edges.end(), edges.begin(), edges.end());
return vertex;
}
// TODO: remove edge from layer
void NN::removeVertex() {
int vertexIdx = random(0, nodes.size());
Vertex* vertex = nodes[vertexIdx];
for (int i=0; i<vertex->inputs.size(); ++i) {
removeEdge(vertex->inputs[i]);
}
for (int i=0; i<vertex->outputs.size(); ++i) {
removeEdge(vertex->outputs[i]);
}
nodes.erase(nodes.begin() + vertexIdx);
}
void NN::removeEdge(Edge* edge) {
removeFromVector(edge->from->outputs, edge);
removeFromVector(edge->to->inputs, edge);
delete edge;
}
Edge* NN::addEdge(Vertex* from, Vertex* to) {
return addEdge(from, to, false);
}
Edge* NN::addEdge(Vertex* from, Vertex* to, bool addLayer) {
Edge *edge = new Edge;
edge->to = to;
edge->from = from;
edge->weight = 0.1;
edge->change = 0.0;
to->inputs.push_back(edge);
from->outputs.push_back(edge);
edges.push_back(edge);
if (addLayer) {
layers[to->layer].edges.push_back(edge);
}
return edge;
}
template<typename T>
inline void removeFromVector(vector<T> & v, const T & item) {
for(typename vector<T>::iterator iter = v.begin(); iter != v.end(); ++iter) {
if(*iter == item) {
v.erase(iter);
break;
}
}
}
int NN::findLayer(Vertex* vertex) {
int maxDepth = -1;
for (int i=0; i<vertex->inputs.size(); ++i) {
maxDepth = max(maxDepth, vertex->inputs[i]->from->layer);
}
vertex->layer = maxDepth+1;
return maxDepth+1;
}
int random(int min, int max) {
return rand()%(max-min + 1) + min;
}
float average(float average, float dataPoint) {
static int N = 20;
average -= average / N;
average += dataPoint / N;
return average;
}
|
13,061 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include <string>
__global__ void matrixMultGPU (double *A, double *B, double *C, int N){
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if(col < N && row < N){
double acc = 0.0;
for(int k=0;k<N;k++)
acc += A[row*N+k] * B[k*N+col];
C[row*N+col] = acc;
}
}
void matrixMultCPU(double *A, double *B, double *C, int N){
double acc;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
acc=0.0;
for(int k=0;k<N;k++)
acc += A[i*N+k]*B[k*N+j];
C[i*N+j] = acc;
}
}
}
std::string testValues(double *A, double *B, int N){
for(int i = 0; i < N*N; ++i)
if(A[i]!=B[i])
return "Mal Cálculo";
return "Buen Cálculo";
}
int main(int argc, char **argv){
cudaError_t error = cudaSuccess;
double *A, *B, *C1, *C2;
double *d_A, *d_B, *d_C;
double CPU, GPU;
if(argc != 2) {
printf("No size given\n");
return -1;
}
int N = atoi(argv[1]);
double size = N*N*sizeof(double);
A = (double*)malloc(size);
B = (double*)malloc(size);
C1 = (double*)malloc(size);
C2 = (double*)malloc(size);
for(int i=0;i<N*N;i++){
A[i]=1;
B[i]=2;
}
//CPU----------------------------
clock_t tic = clock();
matrixMultCPU(A,B,C1, N);
clock_t toc = clock();
//printf("Tiempo CPU: %f segundos", (double)(toc - tic) / CLOCKS_PER_SEC);
CPU = (double)(toc - tic) / CLOCKS_PER_SEC;
printf("%f,",CPU);
//-------------------------------
error = cudaMalloc((void**)&d_A,size);
if(error != cudaSuccess){
printf("Error in cudaMalloc for d_A\n");
exit(0);
}
error = cudaMalloc((void**)&d_B,size);
if(error != cudaSuccess){
printf("Error in cudaMalloc for d_B\n");
exit(0);
}
error = cudaMalloc((void**)&d_C,size);
if(error != cudaSuccess){
printf("Error in cudaMalloc for d_C\n");
exit(0);
}
error = cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error in cudaMemcpy for d_A\n");
exit(0);
}
error = cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error in cudaMemcpy for d_B\n");
exit(0);
}
//GPU----------------------------
dim3 dimBlock(32,32);
dim3 dimGrid(ceil(N/float(dimBlock.x)),ceil(N/float(dimBlock.y)));
clock_t tic2 = clock();
matrixMultGPU<<<dimGrid,dimBlock>>>(d_A,d_B,d_C,N);
cudaDeviceSynchronize();
error = cudaMemcpy(C2,d_C,size,cudaMemcpyDeviceToHost);
if(error != cudaSuccess){
printf("Error in cudaMemcpy for C2\n");
exit(0);
}
clock_t toc2 = clock();
//printf("\n\nTiempo GPU: %f segundos\n", (double)(toc2 - tic2) / CLOCKS_PER_SEC);
GPU = (double)(toc2 - tic2) / CLOCKS_PER_SEC;
printf("%f,%f,%s\n",GPU,(CPU/GPU), testValues(C1,C2,N).c_str());
//--------------------------------
/*
for(int i=0;i<N*N;i++){
if(i%N == 0)
printf("\n");
printf("%f ;",A[i]);
}
printf("\n---------\n");
for(int i=0;i<N*N;i++){
if(i%N == 0)
printf("\n");
printf("%f ;",B[i]);
}
printf("\n---------\n");
for(int i=0;i<N*N;i++){
if(i%N == 0)
printf("\n");
printf("%f ;",C1[i]);
}
printf("\n---------\n");
for(int i=0;i<N*N;i++){
if(i%N == 0)
printf("\n");
printf("%f ;",C2[i]);
}
printf("\n---------\n");
*/
free(A);
free(B);
free(C1);
free(C2);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
13,062 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vecAdd_kernel(float *a, float *b, float *c, int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
void vecAdd(float *h_a, float *h_b, float *h_c, int n) {
float *d_a;
float *d_b;
float *d_c;
size_t bytes = n*sizeof(float);
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 256;
gridSize = (int)ceil((float)n/blockSize);
vecAdd_kernel<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost );
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
int main(int argc, char* argv[]) {
int n = 1024;
/*
if(argc<2) {
printf("Usage: ./vecAdd num_elements\n");
return 0;
}
*/
if(argc>1)
n = atoi(argv[1]);
printf("[BENCH] CUDA Vector Addition, n = %d\n", n);
printf("[BENCH] Xuhao Chen <cxh@illinois.edu>\n");
float *h_a;
float *h_b;
float *h_c;
size_t bytes = n*sizeof(float);
h_a = (float*)malloc(bytes);
h_b = (float*)malloc(bytes);
h_c = (float*)malloc(bytes);
int i;
for( i = 0; i < n; i++ ) {
h_a[i] = ((float) rand() / (RAND_MAX));
h_b[i] = ((float) rand() / (RAND_MAX));
}
vecAdd(h_a, h_b, h_c, n);
float sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("[BENCH] Final result: %f\n", sum/n);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
13,063 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define mag(x,y) sqrt(x*x+y*y)
#define mag2(x,y) x*x+y*y
#define BLOCK_SIZE 512
__global__ void step_kernel(float *posx, float *posy, float *vx, float *vy, int N, float size, float epsilon, float width, float height) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
//printf("%u, ",idx);
const float sqrtarg=1+8*idx;
int j = (int)(sqrt(sqrtarg)+1);
j/=2;
int k = idx - (j*(j-1))/2;
if (j >= N || k >= N){return;}
float magnitude=mag((posx[k]-posx[j]),(posy[k]-posy[j]));
if (magnitude <= size) {
float dot = ((vx[k]-vx[j])*(posx[k]-posx[j]))+((vy[k]-vy[j])*(posy[k]-posy[j]));
float mg = size*size;
vx[k] -= (posx[k]-posx[j]) * (dot/mg);
vy[k] -= (posy[k]-posy[j]) * (dot/mg);
vx[j] -= (posx[j]-posx[k]) * (dot/mg);
vy[j] -= (posy[j]-posy[k]) * (dot/mg);
//when balls get stuck, push one of them just outside the other
posx[j] -= (size/magnitude-0.8)*(posx[k]-posx[j]);
posy[j] -= (size/magnitude-0.8)*(posy[k]-posy[j]);
}
//ideally this shouldn't run in every thread. Will be fixed when this file gets compiled into a library
if(idx == 0) {
if (posx[0] >= width-size/2) {
vx[0] *= -1;
posx[0] = width-size/2;
}
if (posx[0] <= -width+size/2) {
vx[0] *= -1;
posx[0] = -width+size/2;
}
if (posy[0] >= height-size/2) {
vy[0] *= -1;
posy[0] = height-size/2;
}
if (posy[0] <= -height+size/2) {
vy[0] *= -1;
posy[0] = -height+size/2;
}
posx[0]+=epsilon*vx[0];
posy[0]+=epsilon*vy[0];
}
if (posx[j] >= width-size/2) {
vx[j] *= -1;
posx[j] = width-size/2;
}
if (posx[j] <= -width+size/2) {
vx[j] *= -1;
posx[j] = -width+size/2;
}
if (posy[j] >= height-size/2) {
vy[j] *= -1;
posy[j] = height-size/2;
}
if (posy[j] <= -height+size/2) {
vy[j] *= -1;
posy[j] = -height+size/2;
}
posx[j]+=epsilon*vx[j];
posy[j]+=epsilon*vy[j];
} |
13,064 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void device_function() {
}
int main(){
int num_blocks = 1;
int num_threads_per_block = 1;
device_function<<<num_blocks,num_threads_per_block>>>();
printf("called a function on the device\n");
return 0;
}
|
13,065 | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Device overlap: %d\n", prop.deviceOverlap);
printf(" Total Global mem: %d\n", prop.totalGlobalMem);
printf(" Shared Mem per block: %d\n", prop.sharedMemPerBlock);
printf(" warp size: %d\n", prop.warpSize);
printf(" Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" Max threads dim: %d\n", prop.maxThreadsDim);
printf(" Max grid size: %d\n", prop.maxGridSize);
printf(" Concurrent Kernels: %d\n", prop.concurrentKernels);
}
}
|
13,066 | // nvcc gdist.cu -o gdist -lcuda -use_fast_math compiler
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <time.h>
#include <curand_kernel.h>
#define NS 3 // Number of sequences
#define NBASES 100 // Nucleotides / sequence
#define seq(s,i,n) (s[((n)+(i)*(NBASES+1))]) // nth position of the ith sequence in string 's'.
#define getx0(i) (((unsigned int)(-0.5+0.5*sqrtf(1+8*(i-1))))+2)
#define gety0(i) ((unsigned int)((getx0(i))*(3-(getx0(i)))/2+i-1))
#define getx(i) (getx0(i+1)-1)
#define gety(i) (gety0(i+1)-1)
__global__ void pwdist(char *se, float *dist)
{
int tid = blockIdx.x;
const unsigned int x = getx(tid);
const unsigned int y = gety(tid);
unsigned int diff = 0;
for (int base = 0; base < NBASES; ++base)
{
if (seq(se,x,base) != seq(se,y,base))
{
++diff;
}
}
dist[tid] = (float)diff / NBASES;
}
int main (int argc, char *argv[])
{
// generate a bunch of test sequences
const unsigned int ds = NS * (NBASES + 1); // Length of the array.
const unsigned int bytes = ds * sizeof(char);
char *se = (char*)malloc(bytes);
// create sequences
for (int i = 0; i < NS; ++i)
{
for (int j = 0; j < NBASES; ++j)
{
seq(se,i,j) = 'a';
}
seq(se,i,NBASES) = '\0';
}
// Mutations!!!!!!!
seq(se,0,5) = 'c';
seq(se,2,5) = 'c';
seq(se,2,3) = 'g';
// print sequences to screen just to check
for (int i = 0; i < NS; ++i)
{
printf("%s\n", &seq(se,i,0));
}
// number of distances to compute
const unsigned int ndist = (int)(NS*(NS-1)/2);
// memory allocation for the distances
float *h_dist = (float*)malloc(ndist*sizeof(float));
// allocate memory on the devices
char *d_se;
float *d_dist;
cudaMalloc((void**)&d_se, bytes);
cudaMalloc((void**)&d_dist, ndist * sizeof(float));
/*
* HERE BE THE MAIN PROGRAM
*/
// copy the sequences from the host to the device
cudaMemcpy(d_se, se, bytes, cudaMemcpyHostToDevice);
// call CUDA function pwdist
pwdist<<<ndist,1>>>(d_se, d_dist);
printf("Total of %u pairwise distances\n",ndist);
// copy the distances from the device to the host
cudaMemcpy(h_dist, d_dist, ndist * sizeof(float), cudaMemcpyDeviceToHost);
/*
* END OF THE MAIN PROGRAM
*/
// Output of the results in a text form
for (unsigned int i = 0; i < ndist; ++i)
{
printf("(%u,%u) \t %.4f\n", getx(i), gety(i), h_dist[i]);
}
// free the vectors
cudaFree(d_se);
cudaFree(d_dist);
free(se);
free(h_dist);
return EXIT_SUCCESS;
}
|
13,067 | #include <iostream>
#include <cstdio>
__global__ void kernel(int* p)
{
p[threadIdx.x] = threadIdx.x;
printf("ahoj\n");
}
int main(int argc, char** argv)
{
int* dPtr;
cudaMalloc(&dPtr, sizeof(int) * 10);
kernel<<<dim3(1,1,1), dim3(10,1,1)>>>(dPtr);
int ptr[10];
cudaMemcpy(&ptr[0], dPtr, sizeof(int) * 10, cudaMemcpyDeviceToHost);
for (int i = 0; i < 10; i++)
{
std::cout << ptr[i] << std::endl;
}
return 0;
}
|
13,068 | #include "includes.h"
__global__ void cudaSScale_kernel(unsigned int size, float* input, const float scale, const float shift, const float beta, float* result)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
if (beta != 0.0f) {
for (unsigned int i = index; i < size; i += stride)
result[i] = input[i] * scale + shift + beta * result[i];
}
else {
for (unsigned int i = index; i < size; i += stride)
result[i] = input[i] * scale + shift;
}
} |
13,069 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <sys/time.h>
// __global__ void merge(float *data, float *work, int k)
// {
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// int l = index * k;
// int m = l + k / 2;
// int r = l + k;
// int first = l;
// int second = m;
// for (int i = l; i < r; i++)
// {
// if (first < m && (second >= r || data[first] <= data[second]))
// {
// work[i] = data[first];
// first += 1;
// }
// else
// {
// work[i] = data[second];
// second += 1;
// }
// }
// }
// __global__ void parallel_merge(float *data, float *work)
// {
// uint half = blockDim.x >> 1;
// uint pos = blockIdx.x * blockDim.x;
// uint left_array = threadIdx.x < half ? 1 : 0;
// float cur = data[pos + threadIdx.x];
// uint i = 0;
// uint j = half;
// if (left_array)
// {
// while (i < j)
// {
// uint mid = i + (j - i) / 2;
// if (cur <= data[pos + half + mid])
// j = mid;
// else
// i = mid + 1;
// }
// work[pos + threadIdx.x + i] = cur;
// }
// else
// {
// while (i < j)
// {
// uint mid = i + (j - i) / 2;
// if (cur < data[pos + mid])
// j = mid;
// else
// i = mid + 1;
// }
// work[pos + threadIdx.x - half + i] = cur;
// }
// }
__global__ void parallel_merge(float *data, float *work, int stride)
{
uint index = threadIdx.x * stride;
uint pos = blockIdx.x * blockDim.x * stride;
uint half = blockDim.x * stride >> 1;
uint left_array = index < half ? 1 : 0;
for (uint s = 0; s < stride; s++)
{
float cur = data[pos + index + s];
uint i = 0;
uint j = half;
if (left_array)
{
while (i < j)
{
uint mid = i + (j - i) / 2;
if (cur <= data[pos + half + mid])
j = mid;
else
i = mid + 1;
}
work[pos + index + s + i] = cur;
}
else
{
while (i < j)
{
uint mid = i + (j - i) / 2;
if (cur < data[pos + mid])
j = mid;
else
i = mid + 1;
}
work[pos + index + s - half + i] = cur;
}
}
}
int cuda_sort(int number_of_elements, float *a)
{
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return 1;
}
float *d_a, *d_b;
cudaMalloc((void **)&d_a, number_of_elements * sizeof(float));
cudaMalloc((void **)&d_b, number_of_elements * sizeof(float));
cudaMemcpy(d_a, a, number_of_elements * sizeof(float), cudaMemcpyHostToDevice);
int level = 0;
for (int k = 2; k <= number_of_elements; k = 2 * k)
{
int num_merges = number_of_elements / k;
if (k <= 1024)
{
if (level % 2 == 0)
parallel_merge<<<num_merges, k>>>(d_a, d_b, 1);
else
parallel_merge<<<num_merges, k>>>(d_b, d_a, 1);
}
else
{
if (level % 2 == 0)
parallel_merge<<<num_merges, 1024>>>(d_a, d_b, k / 1024);
else
parallel_merge<<<num_merges, 1024>>>(d_b, d_a, k / 1024);
}
cudaDeviceSynchronize();
level += 1;
}
if (level % 2 == 0)
cudaMemcpy(a, d_a, number_of_elements * sizeof(float), cudaMemcpyDeviceToHost);
else
cudaMemcpy(a, d_b, number_of_elements * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
return 0;
} |
13,070 | #ifndef MATRIX_TOOLBOX_CPP
#define MATRIX_TOOLBOX_CPP
#include "MatrixToolbox.cuh"
#include <cstdio>
__global__ void MatrixCpy(double* dst,
const uint32_t dst_row_num,
const uint32_t dst_col_num,
const uint32_t dst_row_start,
const uint32_t dst_row_end,
const uint32_t dst_col_start,
const uint32_t dst_col_end,
const double* src,
const uint32_t src_row_num,
const uint32_t src_col_num,
const uint32_t src_row_start,
const uint32_t src_row_end,
const uint32_t src_col_start,
const uint32_t src_col_end,
const double scalar,
bool srcTranspose){
// use threads to parallelize
uint32_t x_id = threadIdx.x;
uint32_t y_id = threadIdx.y;
if (x_id >= dst_row_end - dst_row_start) return;
if (y_id >= dst_col_end - dst_col_start) return;
uint32_t dst_id = (x_id + dst_row_start) * dst_col_num + y_id + dst_col_start;
uint32_t src_id = 0;
if (!srcTranspose) {
src_id = (x_id + src_row_start) * src_col_num + y_id + src_col_start;
}
else {
src_id = (y_id + src_col_start) * src_row_num + x_id + src_row_start;
}
if (scalar == 1.0) {
dst[dst_id] = src[src_id];
}
else {
dst[dst_id] = scalar * src[src_id];
}
__syncthreads();
}
__global__ void MatrixMulVector(const double* A,
const double* x,
double* y,
uint32_t A_num_row,
uint32_t A_num_col,
bool transpose,
double alpha,
double beta,
bool ATakeAbs,
bool xTakeAbs,
bool yTakeAbs) {
// use threads to parallelize
uint32_t t_id = threadIdx.x;
// copy data to shared memory
__shared__ double sha_x[MAX_PREALLOCATE_VECTOR_SIZE];
double y_res = 0;
if (t_id < A_num_col) {
sha_x[t_id] = xTakeAbs ? fabs(x[t_id]) : x[t_id];
}
if (t_id < A_num_row) {
if (beta == 0) {
y_res = 0;
}
else { // compute beta * y
y_res = beta * y[t_id];
}
}
else { // we only need A_num_row threads from now
return;
}
__syncthreads();
// compute alpha * A * x
if (alpha != 0) {
if (!ATakeAbs) {
if (!transpose) {
for (uint32_t col = 0; col < A_num_col; col++) {
y_res += alpha * A[t_id * A_num_col + col] * sha_x[col];
}
}
else {
for (uint32_t col = 0; col < A_num_col; col++) {
y_res += alpha * A[col * A_num_row + t_id] * sha_x[col];
}
}
}
else {
if (!transpose) {
for (uint32_t col = 0; col < A_num_col; col++) {
y_res += alpha * fabs(A[t_id * A_num_col + col]) * sha_x[col];
}
}
else {
for (uint32_t col = 0; col < A_num_col; col++) {
y_res += alpha * fabs(A[col * A_num_row + t_id]) * sha_x[col];
}
}
}
}
y[t_id] = yTakeAbs ? fabs(y_res) : y_res;
__syncthreads();
}
__global__ void MatrixMulMatrix(const double* A,
const double* x,
double* y,
uint32_t A_num_row,
uint32_t A_num_col,
uint32_t x_num_col,
bool A_transpose,
bool x_transpose,
double alpha,
double beta,
bool ATakeAbs,
bool xTakeAbs,
bool yTakeAbs) {
// use threads to parallelize
uint32_t x_id = threadIdx.x;
uint32_t y_id = threadIdx.y;
// copy data to shared memory
__shared__ double sha_A[MAX_PREALLOCATE_MATRIX_SIZE];
__shared__ double sha_x[MAX_PREALLOCATE_MATRIX_SIZE];
double y_res;
if (x_id < A_num_row && y_id < A_num_col) {
sha_A[x_id * A_num_col + y_id] = A[x_id * A_num_col + y_id];
}
if (x_id < A_num_col && y_id < x_num_col) {
sha_x[x_id * x_num_col + y_id] = x[x_id * x_num_col + y_id];
}
if (x_id < A_num_row && y_id < x_num_col) {
y_res = (beta == 0) ? 0 : beta * y[x_id * x_num_col + y_id];
}
else {
return;
}
__syncthreads();
if (alpha != 0) {
if (!A_transpose && !x_transpose) {
for (uint32_t i = 0; i < A_num_col; i++) {
y_res += alpha * sha_A[x_id * A_num_col + i] * sha_x[i * x_num_col + y_id];
}
}
else if (A_transpose && !x_transpose) {
for (uint32_t i = 0; i < A_num_col; i++) {
y_res += alpha * sha_A[i * A_num_row + x_id] * sha_x[i * x_num_col + y_id];
}
}
else if (!A_transpose && x_transpose) {
for (uint32_t i = 0; i < A_num_col; i++) {
y_res += alpha * sha_A[x_id * A_num_col + i] * sha_x[y_id * A_num_col + i];
}
}
else {
for (uint32_t i = 0; i < A_num_col; i++) {
y_res += alpha * sha_A[i * A_num_row + x_id] * sha_x[y_id * A_num_col + i];
}
}
}
y[x_id * x_num_col + y_id] = yTakeAbs ? fabs(y_res) : y_res;
__syncthreads();
}
__global__ void LinearSolver(const double* A,
const double* b,
double* sol,
uint32_t n,
bool A_transpose,
bool solTakeAbs) {
uint32_t x_id = threadIdx.x;
uint32_t y_id = threadIdx.y;
__shared__ double I[MAX_PREALLOCATE_MATRIX_SIZE];
__shared__ double sha_A[MAX_PREALLOCATE_MATRIX_SIZE];
__shared__ double sha_b[MAX_PREALLOCATE_VECTOR_SIZE];
if (x_id < n && y_id < n) {
I[x_id * n + y_id] = (x_id == y_id) ? 1.0 : 0.0;
if (A_transpose) {
sha_A[x_id * n + y_id] = A[y_id * n + x_id];
}
else {
sha_A[x_id * n + y_id] = A[x_id * n + y_id];
}
if (y_id == 0) {
sha_b[x_id] = b[x_id];
}
}
else {
return;
}
__syncthreads();
__shared__ uint32_t newRow;
for (uint32_t i = 0; i < n - 1; i++) {
if (x_id == 0 && y_id == 0) {
newRow = 0;
if (sha_A[i * n + i] == 0) {
for (newRow = i + 1; newRow < n; newRow++) {
if (sha_A[newRow * n + i] != 0) break;
}
}
}
__syncthreads();
if (i + 1 <= newRow && newRow < n) {
if (x_id < n && y_id == 0) {
double temp = sha_A[i * n + x_id];
sha_A[i * n + x_id] = sha_A[newRow * n + x_id];
sha_A[newRow * n + x_id] = temp;
}
else if (x_id == 0 && y_id == 1) {
double temp = sha_b[i];
sha_b[i] = sha_b[newRow];
sha_b[newRow] = temp;
}
}
__syncthreads();
}
for (uint32_t i = 0; i < n; i++) {
if (x_id == i && x_id != y_id) {
I[x_id*n + y_id] /= sha_A[i*n + i];
sha_A[x_id*n + y_id] /= sha_A[i*n + i];
}
__syncthreads();
if (x_id == y_id && x_id == i) {
I[x_id*n + y_id] /= sha_A[i*n + i];
sha_A[x_id*n + y_id] /= sha_A[i*n + i];
}
__syncthreads();
if (x_id != i) {
I[x_id*n + y_id] -= I[i*n + y_id] * sha_A[x_id*n + i];
if (y_id != i) {
sha_A[x_id*n + y_id] -= sha_A[i*n + y_id] * sha_A[x_id*n + i];
}
}
__syncthreads();
}
if (y_id >= 1) return;
// perform matrix multiplication
double sol_res;
if (x_id < n) {
sol_res = 0;
}
else {
return;
}
__syncthreads();
for (uint32_t col = 0; col < n; col++) {
sol_res += I[x_id * n + col] * sha_b[col];
}
sol[x_id] = solTakeAbs ? fabs(sol_res) : sol_res;
__syncthreads();
}
__global__ void LinearSolverMatrix(const double* A,
const double* b,
double* sol,
uint32_t n,
uint32_t b_num_col,
bool A_transpose,
bool b_transpose,
bool solTakeAbs){
uint32_t x_id = threadIdx.x;
uint32_t y_id = threadIdx.y;
__shared__ double I[MAX_PREALLOCATE_MATRIX_SIZE];
__shared__ double sha_A[MAX_PREALLOCATE_MATRIX_SIZE];
__shared__ double sha_b[MAX_PREALLOCATE_MATRIX_SIZE];
if (x_id < n && y_id < n) {
I[x_id * n + y_id] = (x_id == y_id) ? 1.0 : 0.0;
if (A_transpose) {
sha_A[x_id * n + y_id] = A[y_id * n + x_id];
}
else {
sha_A[x_id * n + y_id] = A[x_id * n + y_id];
}
}
if (x_id < n && y_id < b_num_col) {
if (b_transpose) {
sha_b[x_id * b_num_col + y_id] = b[y_id * n + x_id];
}
else {
sha_b[x_id * b_num_col + y_id] = b[x_id * b_num_col + y_id];
}
}
__syncthreads();
__shared__ uint32_t newRow;
for(uint32_t i = 0; i < n - 1; i++){
if (x_id == 0 && y_id == 0) {
newRow = 0;
if (sha_A[i * n + i] == 0) {
for (newRow = i + 1; newRow < n; newRow++) {
if (sha_A[newRow * n + i] != 0) break;
}
}
}
__syncthreads();
if (i + 1 <= newRow && newRow < n) {
if (x_id < n && y_id == 0) {
double temp = sha_A[i * n + x_id];
sha_A[i * n + x_id] = sha_A[newRow * n + x_id];
sha_A[newRow * n + x_id] = temp;
}
else if (x_id < b_num_col && y_id == 1) {
double temp = sha_b[i * b_num_col + x_id];
sha_b[i * b_num_col + x_id] = sha_b[newRow * b_num_col + x_id];
sha_b[newRow * b_num_col + x_id] = temp;
}
}
__syncthreads();
}
for (uint32_t i = 0; i < n; i++) {
if (x_id == i && x_id != y_id) {
I[x_id*n + y_id] /= sha_A[i*n + i];
sha_A[x_id*n + y_id] /= sha_A[i*n + i];
}
__syncthreads();
if (x_id == y_id && x_id == i) {
I[x_id*n + y_id] /= sha_A[i*n + i];
sha_A[x_id*n + y_id] /= sha_A[i*n + i];
}
__syncthreads();
if (x_id != i) {
I[x_id*n + y_id] -= I[i*n + y_id] * sha_A[x_id*n + i];
if (y_id != i) {
sha_A[x_id*n + y_id] -= sha_A[i*n + y_id] * sha_A[x_id*n + i];
}
}
__syncthreads();
}
// perform matrix multiplication
double sol_res;
if (x_id < n && y_id < b_num_col) {
sol_res = 0;
}
else {
return;
}
__syncthreads();
for (uint32_t i = 0; i < n; i++) {
sol_res += I[x_id * n + i] * sha_b[i * b_num_col + y_id];
}
sol[x_id * b_num_col + y_id] = solTakeAbs ? fabs(sol_res) : sol_res;
__syncthreads();
}
__global__ void vecnorm(double* A,
double* output,
uint32_t rowNum,
uint32_t colNum) {
uint32_t colId = threadIdx.x;
double sum = 0;
for (uint32_t i = 0; i < rowNum; i++) {
double elt = A[i * colNum + colId];
sum += elt * elt;
}
output[colId] = sqrt(sum);
__syncthreads();
}
__global__ void GramSchmidt(double* A,
double* output,
uint32_t rowNum,
uint32_t colNum) {
uint32_t t_id = threadIdx.x;
__shared__ double projection[MAX_PREALLOCATE_VECTOR_SIZE];
__shared__ double orthovec[MAX_PREALLOCATE_MATRIX_SIZE];
for (uint32_t i = 0; i < colNum; i++) {
if (i > 0) {
// compute projection first
if (t_id < i) {
double projectionRes = 0;
for (uint32_t j = 0; j < rowNum; j++) {
projectionRes += output[j * colNum + t_id] * A[j * colNum + i];
}
projection[t_id] = projectionRes;
}
__syncthreads();
// compute orthogonal vectors
if (t_id < rowNum) {
orthovec[t_id] = A[t_id * colNum + i];
for (uint32_t j = 0; j < i; j++) {
orthovec[t_id] -= projection[j] * output[t_id * colNum + j];
}
}
}
else {
if (t_id < rowNum) {
orthovec[t_id] = A[t_id * colNum + i];
}
}
__syncthreads();
// normalization
__shared__ double norm;
if (t_id == 0) {
norm = 0;
for (uint32_t j = 0; j < rowNum; j++) {
double elt = orthovec[j];
norm += elt * elt;
}
norm = sqrt(norm);
}
__syncthreads();
if (t_id < rowNum) {
output[t_id * colNum + i] = orthovec[t_id] / norm;
}
__syncthreads();
}
}
#endif |
13,071 | #include "includes.h"
__global__ void square(float * d_out, float * d_in)
{
int idx = threadIdx.x;
// threadIdx is a C struct (dim3) with 3 members - .x | .y | .z
float f = d_in[idx];
d_out[idx] = f * f;
} |
13,072 | #include "includes.h"
__global__ void squareMatrixMulKernel(int *c, int *a, int *b, int arrayWidth)
{
float sum = 0;
//행렬에서 계산하려고 하는 위치의 인덱스 이것은 공식화 된것이므로 외우진 말자.
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
//블록당 쓰레드가 4x4이고
//블록의 개수가 1x1이면
//printf("%d, %d / %d, %d / %d, %d\n", blockDim.x, blockDim.y, blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y);
// 4, 4, 0, 0, x, y 이렇게 앞에 4개의 숫자는 고정된 것을 볼 수 있었다.
//blockDim : 블록 안쪽에 포함된 쓰레드가 어떤 ㅁxㅁ 차원으로 되어있는지.
//blockIdx : 블록의 인덱스
//threadIdx : 쓰레드의 인덱스
for (int i = 0; i < arrayWidth; ++i)
{
float Aelement = a[row * arrayWidth + i];
float Belement = b[i*arrayWidth + col];
sum += Aelement * Belement;
}
c[row * arrayWidth + col] = sum;
} |
13,073 | #include <cstdio>
#include <cuda_runtime_api.h>
__global__ void VecAdd(int* A, int* B, int* C, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
A[i] = B[i] + C[i];
}
}
constexpr int N = 1024;
int A[N], B[N], C[N];
int main() {
for (int i = 0; i < N; ++i) {
B[i] = i + 1;
C[i] = 2 * i - 3;
}
int *dstA, *dstB, *dstC;
cudaMalloc((void**) &dstA, N * sizeof(int));
cudaMalloc((void**) &dstB, N * sizeof(int));
cudaMalloc((void**) &dstC, N * sizeof(int));
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
cudaMemcpy(dstB, B, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dstC, C, sizeof(int) * N, cudaMemcpyHostToDevice);
// Invoke kernel
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(dstA, dstB, dstC, N);
// Copy mem from device to host
cudaMemcpy(A, dstA, N * sizeof(int), cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(dstA);
cudaFree(dstB);
cudaFree(dstC);
// Print result
printf("Results: ");
for (int i = 0; i < N; ++i) {
printf("%d ", A[i]);
}
printf("\n");
} |
13,074 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
// forward propogation
/*
def forwardBackward(X, y, W, v):
Z_trans = relu(W@X.T) # mat-mat
Z = Z_trans.T # trans
yhat = Z@v # mat-vec
error = y - yhat
grad_v = Z.T @ error # mat-vector
grad_Z = np.outer(error, v) # outer product
grad_p = dRelu(dZ, Z)
grad_W = dp.T @ X # mat-mat
*/
/* Parameter Setup */
#define N 1000 // # of input samples
#define D 15 // # of input neurons
#define K 20 // # of hidden neurons
#define STEP 0.001 // learning rate or step size
// X: input matrix (n * d)
#define X_HEIGHT N
#define X_WIDTH D
#define X_N X_HEIGHT * X_WIDTH
// Z: ifmap matrix (n * k)
#define Z_HEIGHT N
#define Z_WIDTH K
#define Z_N Z_HEIGHT * Z_WIDTH
// W: layer 1 weights (k * d)
#define W_HEIGHT K
#define W_WIDTH D
#define W_N W_HEIGHT * W_WIDTH
// v: layer 2 weights
#define V_HEIGHT K
#define V_WIDTH 1
#define V_N V_HEIGHT * V_WIDTH
#define BLOCK_SIZE 32
#define LINEAR_BLOCK_SIZE BLOCK_SIZE * BLOCK_SIZE
#define MAX_ERR 1e-6
__global__ void matrix_mul(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) {
int cid = blockIdx.y * blockDim.y + threadIdx.y;
int rid = blockIdx.x * blockDim.x + threadIdx.x;
if(rid < d_a_height && cid < d_b_width){
// sum: to evaluated dot product
double sum = 0.0;
for(int k = 0; k < d_a_width; k++){
sum += d_A[rid * d_a_width + k] * d_B[d_b_width*k + cid];
}
d_C[rid * d_b_width + cid] = sum;
}
}
__global__ void relu_matrix_mul(double *d_C, double *d_A, double *d_B, int d_a_height, int d_a_width, int d_b_width) {
int cid = blockIdx.y * blockDim.y + threadIdx.y;
int rid = blockIdx.x * blockDim.x + threadIdx.x;
if(rid < d_a_height && cid < d_b_width){
// sum: to evaluated dot product
double sum = 0.0;
for(int k = 0; k < d_a_width; k++){
sum += d_A[rid * d_a_width + k] * d_B[d_b_width*k + cid];
}
d_C[rid * d_b_width + cid] = (sum>0)?sum:0;
}
}
__global__ void d_relu_matrix_mul(double *d_C, double *d_A, double *d_B, double *d_act, int d_a_height, int d_a_width, int d_b_width) {
int cid = blockIdx.y * blockDim.y + threadIdx.y;
int rid = blockIdx.x * blockDim.x + threadIdx.x;
if(rid < d_a_height && cid < d_b_width){
// sum: to evaluated dot product
double sum = 0.0;
for(int k = 0; k < d_a_width; k++){
sum += d_A[rid * d_a_width + k] * d_B[d_b_width*k + cid];
}
d_C[rid * d_b_width + cid] = (d_act[rid * d_b_width + cid]>0)?sum:0;
}
}
__global__ void matrix_transpose(double *d_out, double *d_in, int d_in_width, int d_out_width) {
int cid = blockIdx.y * blockDim.y + threadIdx.y;
int rid = blockIdx.x * blockDim.x + threadIdx.x;
if(cid < d_in_width && rid < d_out_width){
d_out[cid * d_out_width + rid] = d_in[rid * d_in_width + cid];
}
}
__global__ void vector_sub(double *out, double *a, double *b, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n){
out[tid] = a[tid] - b[tid];
}
}
__global__ void update(double *d_weights, double *d_grads, double step, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n){
d_weights[tid] -= step * d_grads[tid];
}
}
__global__ void square(double *out, double *in, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n){
out[tid] = in[tid] * in[tid];
}
}
int main(){
// forward variables
double *h_X, *h_W, *h_Z, *h_Z_T, *h_v, *h_yhat, *h_y;
double *d_X, *d_X_T, *d_W, *d_Z, *d_Z_T, *d_v, *d_yhat, *d_y;
// backward variables
double *h_error, *h_grad_v, *h_grad_Z, *h_grad_p_T, *h_grad_W, *h_err_sq;
double *d_error, *d_grad_v, *d_grad_Z, *d_grad_p_T, *d_grad_W, *d_err_sq;
// double *h_ref; // compute verified results
// Allocate host memory
h_X = (double*)malloc(sizeof(double) * X_N);
h_W = (double*)malloc(sizeof(double) * W_N);
h_v = (double*)malloc(sizeof(double) * V_N);
h_Z_T = (double*)malloc(sizeof(double) * Z_N);
h_Z = (double*)malloc(sizeof(double) * Z_N);
h_yhat = (double*)malloc(sizeof(double) * N);
h_y = (double*)malloc(sizeof(double) * N);
h_error = (double*)malloc(sizeof(double) * N);
h_grad_v = (double*)malloc(sizeof(double) * V_N);
h_grad_Z = (double*)malloc(sizeof(double) * Z_N);
h_grad_p_T = (double*)malloc(sizeof(double) * Z_N);
h_grad_W = (double*)malloc(sizeof(double) * W_N);
h_err_sq = (double*)malloc(sizeof(double) * N);
// h_ref = (double*)malloc(sizeof(double) * N);
// Initialize host arrays
/*** TEST 1 ***/
/*
for(int i = 0; i < X_N; i++){
if(i == 1 || i == 3){
h_X[i] = (double)(-i-1);
} else{
h_X[i] = (double)(i+1);
}
}
for(int i = 0; i < W_N; i++){
h_W[i] = double(i+1);
}
for(int i = 0; i < V_HEIGHT; i++){
h_v[i] = (double)(i+1);
}
for(int i = 0; i < N; i++){
h_y[i] = (double)(i+1);
}
*/
/*** TEST 2 ***/
srand((unsigned int)time(NULL));
// random uniform from [-a, a]
double a = 1.0;
for (int i = 0; i< X_N; i++){
h_X[i] = -a + (double)rand()/(double)(RAND_MAX)*a;
}
for (int i = 0; i< W_N; i++){
h_W[i] = -a + (double)rand()/(double)(RAND_MAX)*a;
}
for (int i = 0; i< V_N; i++){
h_v[i] = -a + (double)rand()/(double)(RAND_MAX)*a;
}
for (int i = 0; i< N; i++){
h_y[i] = -a + (double)rand()/(double)(RAND_MAX)*a;
}
// Allocate device memory
cudaMalloc((void**)&d_X, sizeof(double) * X_N);
cudaMalloc((void**)&d_X_T, sizeof(double) * X_N);
cudaMalloc((void**)&d_Z, sizeof(double) * Z_N);
cudaMalloc((void**)&d_Z_T, sizeof(double) * Z_N);
cudaMalloc((void**)&d_W, sizeof(double) * W_N);
cudaMalloc((void**)&d_v, sizeof(double) * V_N);
cudaMalloc((void**)&d_yhat, sizeof(double) * N);
cudaMalloc((void**)&d_y, sizeof(double) * N);
cudaMalloc((void**)&d_error, sizeof(double) * N);
cudaMalloc((void**)&d_grad_v, sizeof(double) * V_N);
cudaMalloc((void**)&d_grad_Z, sizeof(double) * Z_N);
cudaMalloc((void**)&d_grad_p_T, sizeof(double) * Z_N);
cudaMalloc((void**)&d_grad_W, sizeof(double) * W_N);
cudaMalloc((void**)&d_err_sq, sizeof(double) * N);
// Transfer data from host to device memory
cudaMemcpy(d_X, h_X, sizeof(double) * X_N, cudaMemcpyHostToDevice);
cudaMemcpy(d_W, h_W, sizeof(double) * W_N, cudaMemcpyHostToDevice);
cudaMemcpy(d_v, h_v, sizeof(double) * V_N, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, sizeof(double) * N, cudaMemcpyHostToDevice);
int iters = 20;
for (int i = 0; i < iters; i++){
// Executing kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// X_HEIGHT (N) corresponding to OUT_WIDTH, X_WIDTH (D) corresponding to IN_WIDTH
dim3 dimGrid1(N / BLOCK_SIZE + 1,D / BLOCK_SIZE + 1);
matrix_transpose<<<dimGrid1,dimBlock>>>(d_X_T, d_X, D, N);
dim3 dimGrid2(K / BLOCK_SIZE + 1, N / BLOCK_SIZE + 1);
relu_matrix_mul<<<dimGrid2,dimBlock>>>(d_Z_T, d_W, d_X_T, K, D, N);
dim3 dimGrid3(K / BLOCK_SIZE + 1, N / BLOCK_SIZE + 1);
matrix_transpose<<<dimGrid3,dimBlock>>>(d_Z, d_Z_T, N, K);
dim3 dimGrid4(N / BLOCK_SIZE + 1, 1 / BLOCK_SIZE + 1);
matrix_mul<<<dimGrid4,dimBlock>>>(d_yhat, d_Z, d_v, N, K, 1);
// backwards:
vector_sub<<<N / LINEAR_BLOCK_SIZE + 1, LINEAR_BLOCK_SIZE>>>(d_error, d_yhat, d_y, N);
dim3 dimGrid5(K / BLOCK_SIZE + 1, 1 / BLOCK_SIZE + 1);
matrix_mul<<<dimGrid5,dimBlock>>>(d_grad_v, d_Z_T, d_error, K, N, 1);
dim3 dimGrid6(N / BLOCK_SIZE + 1, K / BLOCK_SIZE + 1);
d_relu_matrix_mul<<<dimGrid6,dimBlock>>>(d_grad_Z, d_error, d_v, d_Z, N, 1, K);
dim3 dimGrid7(N / BLOCK_SIZE + 1, K / BLOCK_SIZE + 1);
matrix_transpose<<<dimGrid7,dimBlock>>>(d_grad_p_T, d_grad_Z, K, N);
dim3 dimGrid8(K / BLOCK_SIZE + 1, D / BLOCK_SIZE + 1);
matrix_mul<<<dimGrid8,dimBlock>>>(d_grad_W, d_grad_p_T, d_X, K, N, D);
// update
update<<<N / LINEAR_BLOCK_SIZE + 1, LINEAR_BLOCK_SIZE>>>(d_W, d_grad_W, (STEP/N), W_N);
update<<<N / LINEAR_BLOCK_SIZE + 1, LINEAR_BLOCK_SIZE>>>(d_v, d_grad_v, (STEP/N), V_N);
// cudaMemcpy(h_W, d_W, sizeof(double) * W_N, cudaMemcpyDeviceToHost);
// cudaMemcpy(h_v, d_v, sizeof(double) * V_N, cudaMemcpyDeviceToHost);
// get MSE back
square<<<N / LINEAR_BLOCK_SIZE + 1, LINEAR_BLOCK_SIZE>>>(d_err_sq, d_error, N);
cudaMemcpy(h_err_sq, d_err_sq, sizeof(double) * N, cudaMemcpyDeviceToHost);
double sum = 0.0;
for(int i = 0; i < N; i++){
sum += h_err_sq[i];
}
printf("MSE is %f\n", sum / N);
}
// Verification
/*
for(int i = 0; i < K; i++){
for(int j = 0; j < D; j++){
// double sum = 0.0;
// for(int k = 0; k < A_WIDTH; k++){
// sum += h_A[i*A_WIDTH+k] * h_B[k*B_WIDTH + j];
// }
// h_ref[i * C_WIDTH + j] = sum;
// assert(fabs(h_ref[i*C_WIDTH + j] - h_C[i * C_WIDTH + j]) < MAX_ERR);
printf("h_W[%d][%d] = %f\n", i, j, h_W[i * D + j]);
// printf("h_Z[%d][%d] = %f\n", i, j, h_Z[i * K + j]);
// printf("h_ref[%d][%d] = %f\n", i, j, h_ref[i * C_WIDTH + j]);
}
}
for(int i = 0; i < K; i++){
printf("h_v[%d] = %f\n", i, h_v[i]);
}
*/
printf("PASSED\n");
// Deallocate device memory
cudaFree(d_X);
cudaFree(d_X_T);
cudaFree(d_W);
cudaFree(d_v);
cudaFree(d_Z);
cudaFree(d_Z_T);
cudaFree(d_yhat);
cudaFree(d_y);
cudaFree(d_error);
cudaFree(d_grad_v);
cudaFree(d_grad_Z);
cudaFree(d_grad_p_T);
cudaFree(d_grad_W);
cudaFree(d_err_sq);
// Deallocate host memory
free(h_X);
free(h_W);
free(h_v);
free(h_Z);
free(h_Z_T);
free(h_yhat);
free(h_y);
free(h_error);
free(h_grad_v);
free(h_grad_Z);
free(h_grad_p_T);
free(h_grad_W);
free(h_err_sq);
}
|
13,075 | #include <cuda.h>
#include <stdio.h>
#include <sys/time.h>
#include <sys/resource.h>
// Tipo de los datos del algoritmo
typedef int data_t;
// Prototipos
data_t add(const data_t a, const data_t b) { return a + b; }
data_t sub(const data_t a, const data_t b) { return a - b; }
void init_matrix(data_t *M, const unsigned int size, data_t(*init_op)(const data_t, const data_t));
void run_GPU(data_t* host_A, data_t* host_B, const unsigned int n_bytes);
void print_matrix(data_t * const M, const unsigned int size);
double tick();
__global__ void kernel_op_1(data_t * A, data_t * B);
__global__ void kernel_op_2(data_t * M, const unsigned int size);
// Host function
int
main(int argc, char** argv)
{
const unsigned int N = (argc == 2) ? atoi(argv[1]) : 0;
double t, resultado;
if (!N){
printf("Parametros incorrectos. El programa se cierra\n");
return -1;
}
// Mostrar tipo de elemento
printf("Tamaño del elemento a procesar: %d bytes\n", sizeof(data_t));
// En la CPU...
// ...Aloca matrices
t = tick();
const unsigned int n_bytes = sizeof(data_t)*N*N;
data_t *host_A = (data_t*) malloc(n_bytes);
data_t *host_B = (data_t*) malloc(n_bytes);
t = tick() - t;
printf("Alocar matrices en mem. de CPU: %f\n", t);
// ...Inicializa matrices
t = tick();
init_matrix(host_A, N, &add);
init_matrix(host_B, N, &sub);
t = tick() - t;
printf("Inicializar matrices en mem. de CPU: %f\n", t);
#ifdef DEBUG
printf("Matriz A =====\n");
print_matrix(host_A, N);
printf("Matriz B =====\n");
print_matrix(host_B, N);
#endif
run_GPU(host_A, host_B, n_bytes);
// Verificacion de resultados
#ifdef DEBUG
printf("Resultado parcial =====\n");
print_matrix(host_A, N);
#endif
//Paso final: dividir la suma
resultado = host_A[0]/((float)N*N);
t = tick();
free(host_A);
free(host_B);
t = tick() - t;
printf("Liberacion de mem. CPU: %f\n", t);
printf("\x1B[36mResultado final =====>>> %f\x1B[0m\n", resultado);
return 0;
}
void
run_GPU(data_t* host_A, data_t* host_B, const unsigned int n_bytes)
{
data_t *gpu_A, *gpu_B;
const unsigned int size = n_bytes / sizeof(data_t);
unsigned int i;
double t;
// Aloca memoria en GPU
t = tick();
cudaMalloc((void**)&gpu_A, n_bytes);
cudaMalloc((void**)&gpu_B, n_bytes);
t = tick() - t;
printf("Alocar matrices en mem. de GPU: %f\n", t);
// Copia los datos desde el host a la GPU
t = tick();
cudaMemcpy(gpu_A, host_A, n_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_B, host_B, n_bytes, cudaMemcpyHostToDevice);
t = tick() - t;
printf("Copia de datos desde mem. CPU hacia mem. GPU: %f\n", t);
// Configura el tamaño de los grids y los bloques
dim3 dimGrid(1);
dim3 dimBlock(16);
// Invoca al kernel
t = tick();
kernel_op_1<<< dimGrid, dimBlock >>>(gpu_A, gpu_B);
cudaThreadSynchronize();
for (i=1; i<size; i*=2) {
kernel_op_2<<< dimGrid, dimBlock >>>(gpu_A, i);
cudaThreadSynchronize();
}
t = tick() - t;
printf("\x1B[33mEjecucion del kernel de GPU: %f\x1B[0m\n", t);
// Recupera los resultados, guardandolos en el host
t = tick();
cudaMemcpy(host_A, gpu_A, n_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(host_B, gpu_B, n_bytes, cudaMemcpyDeviceToHost);
t = tick() - t;
printf("Copia de datos desde mem. GPU hacia mem. CPU: %f\n", t);
// Libera la memoria alocada en la GPU
t = tick();
cudaFree(gpu_A);
cudaFree(gpu_B);
t = tick() - t;
printf("Liberar mem. de GPU: %f\n", t);
}
// Los kernels que ejecutaran por cada hilo de la GPU
__global__ void kernel_op_1(data_t *A, data_t *B) {
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
A[global_id] = (A[global_id] - B[global_id]) * (A[global_id] - B[global_id]);
}
__global__ void kernel_op_2(data_t *M, const unsigned int offset) {
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
M[global_id] += M[global_id + offset];
}
// Funcion para la inicializacion de las matrices
void
init_matrix(data_t *M, const unsigned int size, data_t(*init_op)(const data_t, const data_t))
{
unsigned int i,j;
for (i=0; i<size; i++) {
for (j=0; j<size; j++) {
M[i*size + j] = (*init_op)(i,j);
}
}
}
// Impresion de matriz
void print_matrix(data_t * const M, const unsigned int size) {
int i,j;
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++)
printf("%8d ", M[i*size+j]);
printf("\n");
}
}
// Para medir los tiempos
double tick(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
|
13,076 | #include <stdio.h>
#include <assert.h>
#include <inttypes.h>
#include <stdint.h>
// #include "utils.h"
#define CHUNKSIZE 16
#define THREADS_PER_BLOCK 256
#define MAXN 16777216
#define MAXBLOCKS (MAXN / CHUNKSIZE / THREADS_PER_BLOCK)
uint32_t A[MAXN], B[MAXN], C[MAXN];
// function for debugging.
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__ uint32_t rotate_left(uint32_t x, uint32_t n) {
return (x << n) | (x >> (32-n));
}
__device__ uint32_t encrypt(uint32_t m, uint32_t key) {
return (rotate_left(m, key&31) + key)^key;
}
// 1. kernel
__global__ void mul(int N, uint32_t key1, uint32_t key2, uint32_t* out){
int chunk_id = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t res = 0;
// process chunk
int start = chunk_id * CHUNKSIZE;
int end = (chunk_id+1)*CHUNKSIZE;
end = N < end ? N : end;
for(int k = start; k < end; k++){
res += encrypt(k, key1) * encrypt(k, key2);
}
out[chunk_id] = res;
}
// 2. add reduction
__global__ void mul_reduce(int N, uint32_t key1, uint32_t key2, uint32_t* out){
__shared__ uint32_t sdata[THREADS_PER_BLOCK]; // for reduction
int tid = threadIdx.x;
int chunk_id = threadIdx.x + blockIdx.x * blockDim.x;
// process chunk
int start = chunk_id * CHUNKSIZE;
int end = (chunk_id+1)*CHUNKSIZE;
end = N < end ? N : end;
sdata[tid] = 0;
for(int k = start; k < end; k++){
sdata[tid] += encrypt(k, key1) * encrypt(k, key2);
}
__syncthreads();
for(int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) out[blockIdx.x] = sdata[0];
}
int divCeil(int a, int b){
int c = a / b;
if (c * b < a){
c++;
}
return c;
}
int main(int argc, char *argv[]) {
int N, N_CHUNKS, BLOCKS;
uint32_t key1, key2;
uint32_t *devC;
gpuErrchk(cudaMalloc(&devC, sizeof(uint32_t) * MAXN));
while (scanf("%d %" PRIu32 " %" PRIu32, &N, &key1, &key2) == 3) {
N_CHUNKS = divCeil(N, CHUNKSIZE);
BLOCKS = divCeil(N_CHUNKS, THREADS_PER_BLOCK);
mul_reduce <<< BLOCKS, THREADS_PER_BLOCK >>> (N, key1, key2, devC);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// copyback and sum
gpuErrchk(cudaMemcpy(C, devC, sizeof(uint32_t) * BLOCKS, cudaMemcpyDeviceToHost));
uint32_t sum = 0;
for (int i = 0; i < BLOCKS; i++){
sum += C[i];
}
printf("%" PRIu32 "\n", sum);
}
cudaFree(devC);
return 0;
} |
13,077 | #include "includes.h"
__global__ void gpu_find_hac(const int Nc, const int Nd, const double* g_heat, double* g_hac)
{
//<<<Nc, 128>>>
__shared__ double s_hac_xi[128];
__shared__ double s_hac_xo[128];
__shared__ double s_hac_yi[128];
__shared__ double s_hac_yo[128];
__shared__ double s_hac_z[128];
int tid = threadIdx.x;
int bid = blockIdx.x;
int number_of_patches = (Nd - 1) / 128 + 1;
int number_of_data = Nd - bid;
s_hac_xi[tid] = 0.0;
s_hac_xo[tid] = 0.0;
s_hac_yi[tid] = 0.0;
s_hac_yo[tid] = 0.0;
s_hac_z[tid] = 0.0;
for (int patch = 0; patch < number_of_patches; ++patch) {
int index = tid + patch * 128;
if (index + bid < Nd) {
s_hac_xi[tid] += g_heat[index + Nd * 0] * g_heat[index + bid + Nd * 0] +
g_heat[index + Nd * 0] * g_heat[index + bid + Nd * 1];
s_hac_xo[tid] += g_heat[index + Nd * 1] * g_heat[index + bid + Nd * 1] +
g_heat[index + Nd * 1] * g_heat[index + bid + Nd * 0];
s_hac_yi[tid] += g_heat[index + Nd * 2] * g_heat[index + bid + Nd * 2] +
g_heat[index + Nd * 2] * g_heat[index + bid + Nd * 3];
s_hac_yo[tid] += g_heat[index + Nd * 3] * g_heat[index + bid + Nd * 3] +
g_heat[index + Nd * 3] * g_heat[index + bid + Nd * 2];
s_hac_z[tid] += g_heat[index + Nd * 4] * g_heat[index + bid + Nd * 4];
}
}
__syncthreads();
#pragma unroll
for (int offset = blockDim.x >> 1; offset > 0; offset >>= 1) {
if (tid < offset) {
s_hac_xi[tid] += s_hac_xi[tid + offset];
s_hac_xo[tid] += s_hac_xo[tid + offset];
s_hac_yi[tid] += s_hac_yi[tid + offset];
s_hac_yo[tid] += s_hac_yo[tid + offset];
s_hac_z[tid] += s_hac_z[tid + offset];
}
__syncthreads();
}
if (tid == 0) {
g_hac[bid + Nc * 0] = s_hac_xi[0] / number_of_data;
g_hac[bid + Nc * 1] = s_hac_xo[0] / number_of_data;
g_hac[bid + Nc * 2] = s_hac_yi[0] / number_of_data;
g_hac[bid + Nc * 3] = s_hac_yo[0] / number_of_data;
g_hac[bid + Nc * 4] = s_hac_z[0] / number_of_data;
}
} |
13,078 |
extern "C" __global__ void hpool_max(
unsigned short* param_O,
unsigned short* param_B,
const unsigned short* param_I,
int param_mode,
int param_N,
int param_W,
int param_H,
int param_D,
int param_C,
int param_WN,
int param_HWN,
int param_DHWN,
int param_P,
int param_Q,
int param_magic_P,
int param_shift_P,
int param_QN,
int param_PQN,
int param_MPQN,
int param_pad_j,
int param_pad_d,
int param_pad_h,
int param_pad_w,
int param_str_j,
int param_str_d,
int param_str_h,
int param_str_w,
int param_S,
int param_RS,
int param_RST,
int param_JRST,
int param_magic_S,
int param_shift_S,
int param_magic_RS,
int param_shift_RS,
int param_magic_RST,
int param_shift_RST,
int param_overlap
) {
*param_O = 0;
}
|
13,079 |
#include <stdlib.h>
#include <stdio.h>
extern int j = 0;
//global function run on gpu's and must return void
__global__ void kernel(int *array, int j){
int index = blockIdx.x * blockDim.x + threadIdx.x;
array[index] = j;
j++;
}
int main(void){
int elements = 256;
int bytes = elements * sizeof(int);
//points to cpu and gpu arrays
int *gpu_array = 0;
int *cpu_array = 0;
//malloc cpu array
cpu_array = (int *)malloc(bytes);
//cudaMalloc gpu array
cudaMalloc((void**) &gpu_array, bytes);
int blockSize = 128;
int gridSize = elements/ blockSize;
kernel<<<gridSize, blockSize>>>(gpu_array, j);
//copy to host
cudaMemcpy(cpu_array, gpu_array, bytes, cudaMemcpyDeviceToHost);
//print results
for(int i = 0; i < elements; ++i){
printf("%d ", cpu_array[i]);
}
printf("\n");
//de_allocate memory
free(cpu_array);
cudaFree(gpu_array);
}
|
13,080 | #include <time.h>
#include <cuda.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h>
#define N 100
#define MAXCOL 100
#define Nic 5
#define NL_min 0
#define NL_max 70
#define NL_step 1
#define Ng_max 10
#define FILENAME "results.txt"
typedef struct {
int All_sync_count1[NL_max-NL_min][Ng_max];
int All_sync_count2[NL_max-NL_min];
} global_mem;
typedef struct {
unsigned short ic, iL, nL_break, ig;
unsigned short spike_count[N];
double v_init[N];
double tspike[N * MAXCOL];
} simulation_result;
/************************ DEBUG FUNCTIONS ************************/
/* Check the weights on GPU memory *
__global__ void check_weights(double w[(NL_max - NL_min) * Ng_max / NL_step][N][N]) {
printf("hi_check_weights\n");
int n = (NL_max - NL_min) / NL_step * Ng_max;
for(int i = 0; i < n; ++i) {
printf("\nnL = %d\tng = %d\n\t", NL_min + NL_step * i / Ng_max, i % Ng_max);
for(int j = 0; j < N; ++j) {
for(int k = 0; k < N; ++k) {
printf("%.2lf ", w[i][j][k]);
}
printf("\n\t");
}
}
printf("\n");
}
/* Check the global data on GPU memory *
__global__ void check_g_mem(global_mem *g_mem) {
double sum1 = 0.0, sum2 = 0.0;
for(int i = 0; i < NL_max - NL_min; ++i) {
for(int j = 0; j < Ng_max; ++j) {
sum1 += g_mem->All_sync_count1[i][j];
}
sum2 += g_mem->All_sync_count2[i];
}
printf("sum1 = %f\nsum2 = %f\n", sum1, sum2);
}
/*******************************************************************/
/* Generate a adjacency matrix for a connected graph with nL edges missing */
__device__ unsigned short synaptic_weights_connected_network(double w[][N], unsigned short nL, curandState *rand_state) {
unsigned short i,j,k,kk,neuron1,neuron2;
double w_flag[N][N];
unsigned short syn_to_remove, tot_syn_removed;
short connected_nodes[N];
unsigned short current_ptr, endptr, parent_node;
unsigned short flag_connected = 0;
unsigned short flag_already_connected;
// GENERATE AN ALL-TO-ALL NETWORK ************************************************************************
for(i = 0; i < N; i++) {
for(j = 0; j < N; j++) {
if(j != i){
w[i][j] = 1;
}
else if(j == i){
w[i][j] = 0;
}
}
}
// REMOVE SYNAPSES FROM ABOVE ALL-TO-ALL NETWORK *********************************************************
syn_to_remove = nL;
tot_syn_removed = 0;
// Initialize array w_flag
for(k = 0; k < N; k++) {
for(kk = 0; kk < N; kk++) {
w_flag[k][kk] = 0; // w_flag[k][kk] is changed to value 1, if the synapse between k --> kk is removed
}
}
// Generate a new network by removing synapses randomly
while(tot_syn_removed < syn_to_remove) {
neuron1 = curand(rand_state) % N;
neuron2 = curand(rand_state) % N;
if(neuron1 != neuron2) {
if(w_flag[neuron1][neuron2] == 0) { // synapse between these two neurons has not been changed.
w_flag[neuron1][neuron2] = 1;
w_flag[neuron2][neuron1] = 1;
w[neuron1][neuron2] = 0;
w[neuron2][neuron1] = w[neuron1][neuron2];
tot_syn_removed++;
}
}
}
// Is the network generated above connected ? /////////////
//w[0][0] = 0; w[0][1] = 1; w[0][2] = 1; w[0][3] = 0; w[0][4] = 1; w[0][5] = 0;
//w[1][0] = w[0][1]; w[1][1] = 0; w[1][2] = 1; w[1][3] = 0; w[1][4] = 0; w[1][5] = 1;
//w[2][0] = w[0][2]; w[2][1] = w[1][2]; w[2][2] = 0; w[2][3] = 0; w[2][4] = 1; w[2][5] = 0;
//w[3][0] = w[0][3]; w[3][1] = w[1][3]; w[3][2] = w[2][3]; w[3][3] = 0; w[3][4] = 0; w[3][5] = 0;
//w[4][0] = w[0][4]; w[4][1] = w[1][4]; w[4][2] = w[2][4]; w[4][3] = w[3][4]; w[4][4] = 0; w[4][5] = 1;
//w[5][0] = w[0][5]; w[5][1] = w[1][5]; w[5][2] = w[2][5]; w[5][3] = w[3][5]; w[5][4] = w[4][5]; w[5][5] = 0;
//w[0][0] = 0; w[0][1] = 0; w[0][2] = 1; w[0][3]=0;
//w[1][0] = w[0][1]; w[1][1] = 0; w[1][2] = 1; w[1][3] =0;
//w[2][0]=w[0][2]; w[2][1]=w[1][2]; w[2][2] =0; w[2][3] = 1;
//w[3][0] = w[0][3]; w[3][1] = w[1][3]; w[3][2] = w[2][3]; w[3][3]=0;
// for(k = 0; k < N; k++) {
// for(kk = 0; kk < N; kk++) {
// w_flag[k][kk] = 0; // w_flag[k][kk] is changed to value 1, if the synapse between k --> kk is removed
// }
// }
connected_nodes[0] = 0;
for(i=1;i<N;i++) {
connected_nodes[i] = -1;
}
current_ptr = 0;
endptr = 0; // points towards the last non-zero element in the connected_nodes array
while(current_ptr <= endptr) {
for(i = 0; i < N; i++) {
parent_node = connected_nodes[current_ptr];
flag_already_connected = 0;
for(j = 0; j <= endptr; j++) {
if(connected_nodes[j] == i) {
flag_already_connected = 1;
}
}
if(w[parent_node][i] == 1) {
if(w_flag[parent_node][i] == 0) {
if(flag_already_connected ==0) {
endptr ++;
connected_nodes[endptr] = i; // stores node numbers connected to parent_node
w_flag[parent_node][i] = 1;
w_flag[i][parent_node] = w_flag[parent_node][i]; //links already visited
//printf("i= %d \t endptr= %d \t current_ptr= %d \t connected_nodes[endptr] = %d \n",i, endptr,current_ptr,connected_nodes[endptr]);
}
}
}
if (i == N-1) {
current_ptr++;
}
}
}
if(endptr == N-1) {
flag_connected = 1;
}
return flag_connected;
}
/* Create weight matrices in GPU memory */
__global__ void store_weights(double w[(NL_max - NL_min) / NL_step * Ng_max][N][N]) {
unsigned short threadId = blockIdx.x * blockDim.x + threadIdx.x;
unsigned short nL_break = NL_min + threadId * NL_step;
unsigned short flag_connected;
curandState rand_state;
curand_init(1234, threadId, 0, &rand_state);
for(unsigned short i = 0; i < Ng_max; ++i) {
flag_connected = 0;
do {
flag_connected = synaptic_weights_connected_network(w[threadId * Ng_max + i], nL_break, &rand_state);
} while(flag_connected == 0);
}
}
/* Run a simulation on a single thread */
__global__ void simulate(simulation_result *results, global_mem *g_mem, double w[(NL_max - NL_min) / NL_step * Ng_max][N][N]) {
unsigned short threadId = blockIdx.x * blockDim.x + threadIdx.x;
unsigned short num_simulations = (NL_max - NL_min) / NL_step * Ng_max * Nic;
// Check if this thread a valid one
if(threadId >= num_simulations) {
return;
}
// Initialize and seed the random number generator
curandState rand_state;
curand_init(threadId, clock(), clock(), &rand_state);
double tmax = 20;
double dt = 0.0002;
double epsilon = 0.01;
double vth = 0.8;
double vreset = 0;
double a = 1;
double b = 1;
double tol = 0.0001;
int Nstep = tmax / dt;
unsigned short ic = threadId % Nic;
unsigned short ig = (threadId / Nic) % Ng_max;
unsigned short iL = threadId / Ng_max / Nic;
unsigned short nL_break = NL_min + iL * NL_step;
results[threadId].ic = ic;
results[threadId].ig = ig;
results[threadId].iL = iL;
results[threadId].nL_break = nL_break;
int i;
unsigned short k, kk, InSync_neurons;
unsigned short spike[N], push_up_flag[N];
double f0, f1, f2, f3, tspike_diff1, tspike_diff2, t_old, t_new;
double v_old[N], v_new[N], push_up_amnt[N];
// double v_initnew[100]= {0.1545367758770665, 0.814818668976894, 0.15320113199547414, 0.8353524225981629, 0.08115890455440067, 0.6914756325608367, 0.4130575136157111, 0.5278299763853765, 0.2812216969669379, 0.8062893532936973, 0.9026514070819015, 0.6496189902535245, 0.6286630367202969, 0.6171265038631547, 0.472005565894945, 0.43981531433376, 0.8449193307307433, 0.3499655732796455, 0.6064637293486522, 0.1567131568957726, 0.6917890946540877, 0.19314656121526463, 0.9715334462829239, 0.42821872654614646, 0.5153519308836192, 0.8849979650599988, 0.6757089505722944, 0.31767924448674467, 0.2910320632769062, 0.32862537004994197, 0.45168148961810184, 0.01955708613009799, 0.5696484846788225, 0.450835587565686, 0.026054486371280938, 0.35039306479694443, 0.4040846812243857, 0.27342993028260487, 0.5638358124122043, 0.9484997135038367, 0.4077636621202826, 0.8220935863179847, 0.7196517781502417, 0.5968801478996293, 0.17909455403785213, 0.9071518551971325, 0.49350749777889813, 0.8002803025938409, 0.3071891631672753, 0.5367924012551228, 0.8628384065372916, 0.9147597382639411, 0.5859467778984498, 0.506728558827792, 0.5444346202867876, 0.7105452431393048, 0.8833280213387779, 0.7101823916271959, 0.21378218672881877, 0.2647380984685085, 0.8051689609566608, 0.636661266440235, 0.1284215317086359, 0.8991055384060852, 0.9185260634481671, 0.7505310205211034, 0.5449904790914537, 0.8418539582522988, 0.8227024116656272, 0.8206769102729885, 0.5615504438601934, 0.9070762107580452, 0.37619234543451996, 0.23085180280640882, 0.6623891864245589, 0.9806074893915904, 0.8067560379883594, 0.9895526050531294, 0.5548342062752014, 0.818488769718889, 0.48622692029833214, 0.6501553126075313, 0.3176597622855678, 0.9742850850234102, 0.6065112069910525, 0.37288262643468995, 0.074431646812396, 0.194162041772725, 0.021779459371789267, 0.2856071586947684, 0.5653325199766001, 0.10132723526598542, 0.7041397023518559, 0.6412510211401311, 0.061293406975714726, 0.2728425423344597, 0.6529094748027036, 0.6152282218769618, 0.2633952283711999, 0.44178953896737416};
// Generate initial state
for(kk = 0; kk < N; kk++) {
results[threadId].v_init[kk] = curand_uniform_double(&rand_state) * (vth);
v_old[kk] = results[threadId].v_init[kk];
}
// for(kk = 0; kk < N; kk++) {
// results[threadId].v_init[kk] = v_initnew[kk];
// v_old[kk] = results[threadId].v_init[kk];
// }
// initialize arrays
memset(results[threadId].spike_count, 0, N * sizeof(unsigned short));
memset(results[threadId].tspike, 0, N * MAXCOL * sizeof(double));
// Time loop begins
t_old = 0;
for(i = 1; i < Nstep; i++) {
t_new = i*dt;
// Identify (1) the neurons that spiked in previous time step, (2) time of all the spikes of each neuron
// (3) total number of spikes in each neuron so far
for(kk = 0; kk < N; kk++) {
push_up_amnt[kk] = 0; // initialize these arrays at every time step
push_up_flag[kk] = 0;
if(v_old[kk] >= vth) {
spike[kk] = 1; // if neuron spiked
results[threadId].spike_count[kk]++;
results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]] = t_old;
}
else {
spike[kk] = 0; // if neuron did not spike
}
}
for(kk = 0; kk < N; kk++) {
for(k = 0; k < N; k++) {
if(k != kk && spike[kk] != 1 && spike[k]==1) {
push_up_amnt[kk] = push_up_amnt[kk] +
(epsilon) * w[threadId % Nic][kk][k] * spike[k];
push_up_flag[kk] = 1;
}
}
if(v_old[kk] < vth) {
if(push_up_flag[kk] == 1) {
v_new[kk] = v_old[kk] + push_up_amnt[kk];
if(v_new[kk] >= vth) {
v_new[kk] = vreset;
results[threadId].spike_count[kk]++;
results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]] = t_old;
}
}
else if(push_up_flag[kk] == 0) {
f0 = a - b * v_old[kk];
f1 = a - b * (v_old[kk] + f0 * 0.5 * dt);
f2 = a - b * (v_old[kk] + f1 * 0.5 * dt);
f3 = a - b * (v_old[kk] + f2 * dt);
v_new[kk] = v_old[kk] + dt * (f0 + 2 * f1 + 2 * f2 + f3) / 6;
}
}
else if (v_old[kk] >= vth) {
v_new[kk] = vreset;
}
// swap v_old & v_new for next time iteration
v_old[kk] = v_new[kk];
}
// Advance time
t_old = t_new;
} // Time loop ends
// Count number of iL-networks where all neurons fire in sync
InSync_neurons = 1;
for(kk = 1; kk < N; kk++) {
tspike_diff1 = fabs(results[threadId].tspike[0 * MAXCOL + results[threadId].spike_count[0] - 11] -
results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk] - 11]);
tspike_diff2 = fabs(results[threadId].tspike[0 * MAXCOL + results[threadId].spike_count[0] - 10] -
results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk] - 10]);
if(tspike_diff1 < tol && tspike_diff2 < tol) {
InSync_neurons++; // count number of neurons firing in sync for the chosen initial condition
}
}
if(InSync_neurons == N) {
//g_mem->All_sync_count1[iL][ig]++; // count number of ic's that yield All-sync for iL-iG network.
g_mem->All_sync_count2[iL]++;
//printf("Number of instances of full sync = %d \n",All_sync_count2[iL]);
//fprintf(all_sync,"Number of instances of full sync = %d \n",All_sync_count2[0]);
}
// Write spike time on file
/*for(kk=0;kk<N;kk++) {
tmp1 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-7];
tmp2 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-8];
tmp3 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-9];
tmp4 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-10];
tmp5 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-11];
tmp6 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-12];
tmp7 = 10000*results[threadId].tspike[kk * MAXCOL + results[threadId].spike_count[kk]-13];
//fprintf(spike_time,"%d \t %lu \t %lu \t %lu \t %lu \t %lu \t \%d \n",kk,tmp1,tmp2,tmp3,tmp4,tmp5,flag_unconnctd_graph);
//fprintf(spike_time,"%d \t %lu \t %lu \t %lu \t %lu \t %lu \t %lu \t %lu \n",kk,tmp1,tmp2,tmp3,tmp4,tmp5,tmp6,tmp7);
}*/
printf("Thread #%d finished with tmax = %lf\n", threadId, t_old);
}
int main() {
unsigned short num_simulations = (NL_max - NL_min) / NL_step * Ng_max * Nic;
printf("Running %d simulations with N = %d, NL_max = %d, Ng_max = %d, Nic = %d\n\n", num_simulations, N, NL_max, Ng_max, Nic);
// Initialize the weight matrices in the GPU memory
void *d_w;
cudaMalloc(&d_w, (NL_max - NL_min) * Ng_max / NL_step * N * N * sizeof(double));
store_weights<<<1, (NL_max - NL_min) / NL_step>>>((double (*)[N][N])d_w);
// Initialize the global GPU memory
global_mem g_mem;
global_mem *d_g_mem;
cudaMalloc(&d_g_mem, sizeof(global_mem));
for(unsigned short i = 0; i < NL_max - NL_min; ++i) {
for(unsigned short j = 0; j < Ng_max; ++j) {
g_mem.All_sync_count1[i][j] = 0;
}
g_mem.All_sync_count2[i] = 0;
}
cudaMemcpy(d_g_mem, &g_mem, sizeof(g_mem), cudaMemcpyHostToDevice);
// Allocate memory for storing results
simulation_result *results = (simulation_result *) malloc(sizeof(simulation_result) * num_simulations);
simulation_result *d_results;
cudaMalloc(&d_results, sizeof(simulation_result) * num_simulations);
// Get optimal grid and block dimensions
int grid_size, block_size;
cudaOccupancyMaxPotentialBlockSize(&grid_size, &block_size, simulate, 0, num_simulations);
printf("Number of blocks = %d, Number of threads in a block = %d\n", grid_size, block_size);
// Start all simulations simultaneously
simulate<<<grid_size, block_size>>>(d_results, d_g_mem, (double (*)[N][N])d_w);
// Retrieve the results back from GPU
cudaMemcpy(results, d_results, sizeof(simulation_result) * num_simulations, cudaMemcpyDeviceToHost);
cudaMemcpy(&g_mem, d_g_mem, sizeof(g_mem), cudaMemcpyDeviceToHost);
// Open a file to store the results
FILE *file = fopen(FILENAME, "w");
// Write the results to file
for(int i = 0; i < num_simulations; ++i) {
unsigned short ic = i % Nic;
unsigned short ig = (i / Nic) % Ng_max;
unsigned short iL = i / Ng_max / Nic;
unsigned short nL_break = NL_min + iL * NL_step;
fprintf(file, "\n------------------------------------------------------------------\n");
// Simulation parameters
fprintf(file, "\n\n%d. nL_break = %d\tig = %d\tic = %d :\n\n\t", i+1, nL_break, ig, ic);
// TODO: Weight matrix
// Initial voltages
fprintf(file, "Initial voltages:\n\t");
for(unsigned short j = 0; j < N; ++j) {
fprintf(file, "%f ", results[i].v_init[j]);
}
// All_sync_count2
fprintf(file, "\n\n\tAll_sync_count2[%d]: %d\n\n\t", iL, g_mem.All_sync_count2[iL]);
// Spike times
fprintf(file, "Spike times:\n\t");
for(unsigned short j = 0; j < N; ++j) {
for(unsigned short k = 1; k <= results[i].spike_count[j]; ++k) {
fprintf(file, "%f ", results[i].tspike[j * MAXCOL + k]);
}
fprintf(file, "\n\t");
}
}
// Clean-up
fclose(file);
free(results);
cudaFree(d_w);
cudaFree(d_g_mem);
cudaFree(d_results);
return 0;
}
|
13,081 | #include "includes.h"
__device__ int greatest_row; __device__ void swap(float* arr, int ind_a, int ind_b)
{
float tmp = arr[ind_a];
arr[ind_a] = arr[ind_b];
arr[ind_b] = tmp;
}
__global__ void swapRow(float* mat, float* b, int cols, int num_block, int k)
{
int row_i = greatest_row;
if (k != row_i) //If the same row don't swap.
{
int row_k = k*cols;
int swap_row = row_i*cols;
// Calc. swap interval
int i = threadIdx.x + blockIdx.x * blockDim.x;
// Swap matrix
for (; i < cols; i += num_block*blockDim.x)
swap(mat, swap_row + i, row_k + i);
// Swap b
if(blockIdx.x == 0 && threadIdx.x == 0)
swap(b, row_i, k);
}
} |
13,082 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <assert.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call.
// this is a No-op in release builds.
// inline suggests to the compiler to define this function in
// a way that it can be replaceable. This can speed up execution.
// this presents the compiler from going through the normal function
// overhead when it is called. It isn't looked up. It is compiled so
// that the instructions are just right there. This is used when the
// function has a small number of instructions.
inline cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
// Copies the data between the device and the host. It also takes
// performance measurements.
// If the transfers are small, then it would be better in a real
// application to do them batched transfers. You can do this by
// using a temporary array, preferably pinned, and packing all
// of the data that needs to be transferred into it. Transfer
// it when ready.
// this method can be used: (there is also a 3D version)
// cudaMemcpy2D(dest, dest_pitch, src, src_pitch, w, h, cudaMemcpyHostToDevice)
void profileCopies(float* h_a, float* h_b, float* d, unsigned int n, char* desc) {
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
//events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda(cudaEventCreate(&startEvent));
checkCuda(cudaEventCreate(&stopEvent));
// record the time it takes for the host to copy the
// data over to the device.
// Note, it's better to do this kind of analysis with
// nvprof or Nsight rather than instrument the code.
checkCuda(cudaEventRecord(startEvent, 0));
checkCuda(cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice));
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
// print result
float time;
checkCuda(cudaEventElapsedTime(&time, startEvent, stopEvent));
printf(" Host to Deice bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
// Do the same thing from the device back to the host.
checkCuda(cudaEventRecord(startEvent, 0));
checkCuda(cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost));
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
checkCuda(cudaEventElapsedTime(&time, startEvent, stopEvent));
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for (int i = 0; i < n; i++) {
if (h_a[i] != h_b[i]) {
printf("*** %s transfers failed *** \n", desc);
break;
}
}
// clean up events
checkCuda(cudaEventDestroy(startEvent));
checkCuda(cudaEventDestroy(stopEvent));
}
int main()
{
unsigned int nElements = 4 * 1024 * 1024;
const unsigned int bytes = nElements * sizeof(float);
//host arrays
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
// device array
float *d_a;
// allocate and initialize
h_aPageable = (float*)malloc(bytes); // host pageable
h_bPageable = (float*)malloc(bytes); // host pageable
checkCuda(cudaMallocHost((void**)&h_aPinned, bytes)); // host pinned
checkCuda(cudaMallocHost((void**)&h_bPinned, bytes)); // host pinned
checkCuda(cudaMalloc((void**)&d_a, bytes)); // device
for (int i = 0; i < nElements; ++i) {
h_aPageable[i] = i;
}
memcpy(h_aPinned, h_aPageable, bytes);
memset(h_bPageable, 0, bytes);
memset(h_bPageable, 0, bytes);
// output device info and transfer size
cudaDeviceProp prop;
checkCuda(cudaGetDeviceProperties(&prop, 0));
printf("\nDevice: %s\n", prop.name);
printf("Transfer size (MB): %d\n", bytes / (1024 * 1024));
// perform copies and report bandwidth
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
printf("\n");
// cleanup
cudaFree(d_a);
cudaFreeHost(h_aPinned);
cudaFreeHost(h_bPinned);
free(h_aPageable);
free(h_bPageable);
// On my machine the pinned memory is over 3 times faster.
// This is all device dependent, however.
// Do not overuse Pinned Memory though. It limits the memory
// available to the operating system, etc. So, test to make
// sure the application is working suitably.
// Ultimately, take care to minimize the number of transfers
// and to optomize them when they must happen. This is the
// bottleneck of hybrid CPU/GPU computing.
return 0;
} |
13,083 | #include <cstdio>
extern "C" {
__device__
static int THREADS_IN_BLOCK = 1024;
__device__
void min_max(int* tab, int for_min, int for_max, int size) {
if (for_min >= size || for_max >= size) {
return;
}
int min = tab[for_min];
int max = tab[for_max];
if (max < min) {
atomicExch(tab + for_max, min);
atomicExch(tab + for_min, max);
}
};
__global__
void odd_even_phase1(int* to_sort, int batch_size, int size) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int thid = x + y*gridDim.x*blockDim.x;
if (thid >= size) {
return;
}
int local_thid = thid % batch_size;
int opposite = thid + batch_size / 2;
if (local_thid < batch_size / 2) {
min_max(to_sort, thid, opposite, size);
}
}
__global__
void odd_even_phase2(int* to_sort, int d, int batch_size, int size) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int thid = x + y*gridDim.x*blockDim.x;
if (thid >= size) {
return;
}
int local_thid = thid % batch_size;
if (local_thid < d || local_thid + d >= batch_size - 1) {
return;
}
int opposite = thid + d;
if (local_thid % (2*d) < d ) {
min_max(to_sort, thid, opposite, size);
}
}
}
|
13,084 | #include <stdio.h>
const int DIM = 2500;
__global__ void Kernel(float *X, float *Y, float *Z) {
unsigned int idx_X = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int idx_Y = threadIdx.y + blockIdx.y * blockDim.y;
// printf("%d\n", idx_X*DIM+idx_Y);
Z[idx_X*DIM+idx_Y] = 20.+X[idx_X]*X[idx_X]+Y[idx_Y]*Y[idx_Y]-10.*(__cosf(2.*M_PI*X[idx_X]) + __cosf(2.*M_PI*Y[idx_Y]));
}
void initialization(float min, float max, float* mem, int dim) {
float delta = (max - min) / (dim - 1);
for (int i = 0; i < dim; i++) {
mem[i] = min + delta * i;
}
}
int main() {
cudaError cudaStatus;
size_t mem_size = sizeof(float)*DIM;
float *hostX, *hostY, *hostZ;
float *devX, *devY, *devZ;
hostX = (float*)malloc(mem_size);
hostY = (float*)malloc(mem_size);
hostZ = (float*)malloc(mem_size*DIM);
cudaMalloc((void**)&devX, mem_size);
cudaMalloc((void**)&devY, mem_size);
cudaMalloc((void**)&devZ, mem_size*DIM);
initialization(-5, 5, hostX, DIM);
cudaMemcpy(devX, hostX, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(devY, devX, mem_size, cudaMemcpyDeviceToDevice);
dim3 N_Block (32, 32, 1);
dim3 N_Grid (DIM/32,DIM/32,1);
Kernel <<< N_Grid, N_Block >>> (devX,devY,devZ);
cudaMemcpy(hostZ, devZ, mem_size*DIM, cudaMemcpyDeviceToHost);
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess) {
printf("Last error: %s\n", cudaGetErrorString(cudaStatus));
return 0;
}
cudaFree(devX);
cudaFree(devY);
cudaFree(devZ);
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
printf("%lf;", hostZ[i*DIM + j]);
}
printf("\n");
}
free(hostX);
free(hostY);
free(hostZ);
return 0;
}
|
13,085 | //pass
//--gridDim=[4,1,1] --blockDim=[32,1,1]
__global__ void sequence_gpu(int *d_ptr, int length)
{
int elemID = blockIdx.x * blockDim.x + threadIdx.x;
if (elemID < length)
{
d_ptr[elemID] = elemID;
}
}
|
13,086 | #include<stdio.h>
#include<cuda.h>
#include<math.h>
#include<sys/time.h>
__global__
void Matadd(char* A, char*B, int N)
{
}
int main()
{
for(int j=0;j<=25;j++)
{
cudaEvent_t start1,stop1,start2,stop2;
float time1,time2, time3;
int i;
int N = pow(2,j);
size_t size = N;
printf ("\n The value of N is %d",N);
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
//allocate input matrices hA, hB, hC,refC in host memory
char* hA; cudaMallocHost(&hA, size);
char* hB; cudaMallocHost(&hB, size);
for(i=0;i<N;i++)
{
hA[i] = rand()%20-10;
}
//allocate memory on the device at location A (GPU)
char* dA;
cudaMalloc((void**) &dA,size);
//allocate memory on the device at location B (GPU)
char* dB;
cudaMalloc((void**) &dB,size);
//timing start for inclusive timing
cudaEventRecord(start1, 0);
//copy vectors from host memory to devie memory
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaEventRecord(stop1, 0);
cudaEventSynchronize(stop1);
//invoke GPU kernel, with two blocks each having eight threads
int threadsperblock = 16;
int blockspergrid = (N + threadsperblock - 1)/ threadsperblock;
cudaEventRecord(start2, 0);
//timing start for exclusive timing
//cudaEventRecord(start2, 0);
Matadd<<<blockspergrid,threadsperblock>>>(dA,dB,N);
cudaMemcpy(hB, dB, size, cudaMemcpyDeviceToHost);
cudaEventRecord(stop2, 0);
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&time1,start1,stop1);
cudaEventElapsedTime(&time2,start2,stop2);
printf("\n The Host to Device time for location A in microseconds for 2 to power %d is %f respectively \n",j,time1);
printf("\n The Device to Host time for location B in microseconds for 2 to power %d is %f respectively \n",j,time2);
time3 = time1 + time2;
printf("\n The total data transfer time in microseconds for 2 to power %d is %f respectively \n",j,time3);
cudaFree(hA);
cudaFree(hB);
cudaFree(dA);
cudaFree(dB);
}
return 0;
}
|
13,087 | /* helloCUDA.cu */
/****************************************************************************/
/* */
/* (C) 2010 Texas Advanced Computing Center. */
/* */
/* For information, contact Frank Willmore: willmore@tacc.utexas.edu */
/* */
/* Shareable in accordance with TACC and University of Texas policies. */
/* */
/****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#define GRID_DIMENSION 2
#define BLOCK_DIMENSION 4
__device__ char d_data_array[GRID_DIMENSION * BLOCK_DIMENSION];
__device__ char d_data_array2D[GRID_DIMENSION][BLOCK_DIMENSION];
__device__ int d_sum_array[BLOCK_DIMENSION];
__device__ float d_mean_array[GRID_DIMENSION];
__device__ float d_std_array[GRID_DIMENSION];
__global__ void increment()
{
int idx;
idx = blockIdx.x * BLOCK_DIMENSION + threadIdx.x;
d_data_array[idx]++;
}
__global__ void increment2D()
{
d_data_array2D[blockIdx.x][threadIdx.x]++;
}
__global__ void printKernel()
{
d_sum_array[threadIdx.x] += d_data_array2D[blockIdx.x][threadIdx.x];
// printf("[%2d][%2d]:::\t%d\n", blockIdx.x, threadIdx.x, d_data_array2D[blockIdx.x][threadIdx.x]);
// printf("sum[%2d]:::\t%d\n", threadIdx.x, d_sum_array[threadIdx.x]);
}
__global__ void calculateSum()
{
d_sum_array[threadIdx.x] += d_data_array2D[blockIdx.x][threadIdx.x];
}
__global__ void calculateMean(char *h_nrgra, char *d_nrgra)
{
// unsigned int idx = gridDim.x * GRID_DIMENSION + blockIdx.x;
// unsigned int idy = gridDim.y * GRID_DIMENSION + blockIdx.y;
// checksum += data_array[idx][idy];
}
__global__ void calculateStandardDeviation(char *h_nrgra, char *d_nrgra)
{
//shared mem example
// unsigned int idx = gridDim.x * GRID_DIMENSION + blockIdx.x;
// unsigned int idy = gridDim.y * GRID_DIMENSION + blockIdx.y;
// checksum += data_array[idx][idy];
}
int main(int argc, char* argv[])
{
int i, j;
FILE *fptr;
char h_data_array[GRID_DIMENSION * BLOCK_DIMENSION];
char h_data_array2D[GRID_DIMENSION][BLOCK_DIMENSION];
int h_sum_array[BLOCK_DIMENSION];
size_t size = sizeof(h_data_array);
// generate an array with random data, then copy it to the device
fptr = fopen("/dev/urandom", "r");
fread(h_data_array, size, 1, fptr);
fclose(fptr);
for (i=0; i< GRID_DIMENSION * BLOCK_DIMENSION; i++) printf("[%2d] = \t%d\n", i, h_data_array[i]);
dim3 dimGrid(GRID_DIMENSION);
dim3 dimBlock(BLOCK_DIMENSION);
/////////////////// 1D array /////////////////////////////////
cudaError_t r = cudaMemcpyToSymbol(d_data_array, h_data_array, size, 0, cudaMemcpyHostToDevice);
cudaThreadSynchronize(); // block until the device has completed
printf("cuda error: %s\n", cudaGetErrorString(r));
assert(r == cudaSuccess);
increment<<< dimGrid, dimBlock >>>();
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
r = cudaMemcpyFromSymbol(h_data_array, d_data_array, size, 0, cudaMemcpyDeviceToHost);
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
for (i=0; i< GRID_DIMENSION * BLOCK_DIMENSION; i++) printf("[%2d] = \t%d\n", i, h_data_array[i]);
/////////////////// 2D array /////////////////////////////////
r = cudaMemcpyToSymbol(d_data_array2D, h_data_array, size, 0, cudaMemcpyHostToDevice);
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
increment2D<<< dimGrid, dimBlock >>>();
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
r = cudaMemcpyFromSymbol(h_data_array2D, d_data_array2D, size, 0, cudaMemcpyDeviceToHost);
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
for (i=0; i< GRID_DIMENSION; i++) for (j=0; j<BLOCK_DIMENSION; j++) printf("[%2d][%2d] = \t%d\n", i, j, h_data_array2D[i][j]);
/////////////////// sum //////////////////////////////////////
memset(h_sum_array, 0, sizeof(h_sum_array));
for (i=0; i< BLOCK_DIMENSION; i++) printf("memset:::[%2d] = \t%d\n", i, h_sum_array[i]);
r = cudaMemcpyToSymbol(d_sum_array, h_sum_array, sizeof(h_sum_array), 0, cudaMemcpyHostToDevice);
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
calculateSum<<< dimGrid, dimBlock >>>();
//printKernel<<< dimGrid, dimBlock >>>();
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
r = cudaMemcpyFromSymbol(h_sum_array, d_sum_array, sizeof(h_sum_array), 0, cudaMemcpyDeviceToHost);
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
for (i=0; i< BLOCK_DIMENSION; i++) printf("[%2d] = \t%d\n", i, h_sum_array[i]);
}
|
13,088 | #include "includes.h"
__global__ void matrix_count(int* data, int* count, int* rows, int* cols){
__shared__ int chunk[CHUNK_SIZE][CHUNK_SIZE];
int x = blockIdx.x * CHUNK_SIZE + threadIdx.x;
int y = blockIdx.y * CHUNK_SIZE + threadIdx.y;
for (int i=0; i<CHUNK_SIZE; i+= CHUNK_ROWS) {
chunk[threadIdx.x][threadIdx.y+i] = data[(y + i) * *cols + x];
}
__syncthreads();
x = blockIdx.y * CHUNK_SIZE + threadIdx.x;
y = blockIdx.x * CHUNK_SIZE + threadIdx.y;
for (int i=0; i<CHUNK_SIZE; i+= CHUNK_ROWS) {
if (x < *rows && y+i < *cols) {
if (chunk[threadIdx.y + i][threadIdx.x] == 1)
atomicAdd(count, 1);
}
}
} |
13,089 | //headers
#include <iostream>
using namespace std;
#define N 10
//main()
int main(void)
{
//function prototypes
void add(int *a, int *b, int *c);
//variable declaration
int i, a[N], b[N], c[N];
//code
//fill the arrays a and b on the CPU
for(i = 0; i < N; i++)
{
a[i] = -i;
b[i] = i * i;
}
add(a, b, c);
//display the result
for(i = 0; i < N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
return (0);
}
void add(int *a, int *b, int *c)
{
//variable declaration
int tid = 0; //this is CPU zero, so we start at zero
//code
while(tid < N)
{
c[tid] = a[tid] + b[tid];
tid += 1; //we have one CPU so we increment by one
}
}
|
13,090 | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void interpolateMulti(int points, int noDims, int dimRes, float* surrogate, float* predictors, float* results) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < points) {
float *lower, *upper, *coeffs;
int *lowerInd;
lower = (float*)malloc((noDims)*sizeof(float));
upper = (float*)malloc((noDims)*sizeof(float));
coeffs = (float*)malloc(((int)pow(2,noDims-1))*sizeof(float));
lowerInd = (int*)malloc((noDims)*sizeof(float));
for (int jj = 0; jj < noDims; jj++) {
lower[jj] = surrogate[jj*dimRes];
upper[jj] = surrogate[(jj+1)*dimRes - 1];
lowerInd[jj] = (int)((dimRes-1)*(predictors[noDims*idx+jj] -
lower[jj])/(upper[jj] - lower[jj]));
if (lowerInd[jj] >= (dimRes-1)) {
lowerInd[jj] = dimRes-2;
} else if (lowerInd[jj] < 0){
lowerInd[jj] = 0;
}
}
// Let's interpolate
// Uppermost dimensions x value
float x0 = surrogate[lowerInd[0]];
float x1 = surrogate[lowerInd[0]+1];
float xd = (predictors[noDims*idx] - x0)/(x1-x0);
// First, assign the yvalues to the coefficients matrix
for (int jj = 0; jj < (int)pow(2,noDims-1); jj++) {
// Get the indices of the yvalues of the lower and upper bounding
// values on this dimension.
int idxL = dimRes*noDims;
for (int kk = 1; kk < noDims; kk++) {
int rem = ((int)(jj/((int)pow(2,noDims - kk - 1))) + 1) - 2*
(int)(((int)(jj/((int)pow(2,noDims - kk - 1))) + 1)/2);
if(rem > 0) {
idxL += lowerInd[kk]*(int)pow(dimRes,noDims - kk - 1);
} else {
idxL += (lowerInd[kk]+1)*(int)pow(dimRes,noDims - kk - 1);
}
}
int idxU = idxL + (lowerInd[0]+1)*(int)pow(dimRes,noDims-1);
idxL += lowerInd[0]*(int)pow(dimRes,noDims-1);
coeffs[jj] = surrogate[idxL]*(1 - xd) + surrogate[idxU]*xd;
}
// Now we work our way down the dimensions using our computed
// coefficients to get the interpolated value.
for (int jj = 1; jj < noDims; jj++) {
// Get the current dimension x value
x0 = surrogate[jj*dimRes + lowerInd[jj]];
x1 = surrogate[jj*dimRes + lowerInd[jj] + 1];
xd = (predictors[jj] - x0)/(x1-x0);
for (int kk = 0; kk < (int)pow(2,jj); kk++) {
int jump = (int)pow(2,noDims - jj - 2);
coeffs[kk] = coeffs[kk]*(1 - xd) + coeffs[kk + jump]*xd;
}
}
// Free variables
free(lowerInd);
free(coeffs);
free(upper);
free(lower);
// Output the result
results[idx] = coeffs[0];
}
} |
13,091 | #include<string.h>
#include<stdio.h>
#include<iostream>
#include<math.h>
#include<fstream>
#include<sys/time.h>
#include<cuda.h>
#define INF 10000;
#define MAX_THREADS_PER_BLOCK 1024
using namespace std;
dim3 gridDimension;
dim3 blockDimension;
bool readInput(char *fileName, int &vCount, int &eCount, int *&vertex, int *&edge, int *&departure, int *&duration, int &source)
{
ifstream fin;
fin.open(fileName);
fin>>vCount>>eCount>>source;
vertex = new int[vCount+1];
edge = new int[eCount];
departure = new int[eCount];
duration = new int[eCount];
for(int i=0; i<=vCount; i++)
fin>>vertex[i];
for(int i=0; i<=eCount-1; i++)
fin>>edge[i];
for(int i=0; i<=eCount-1; i++)
fin>>departure[i];
for(int i=0; i<=eCount-1; i++)
fin>>duration[i];
//cout<<"reading the input is over"<<endl;
return true;
}
bool printInput(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration)
{
ofstream fout;
fout.open("csr2.txt");
for(int i=0; i<=vCount; i++)
fout<<vertex[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<edge[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<departure[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<duration[i]<<" ";
fout<<endl;
return true;
}
void initConfiguration(dim3 &grid, dim3 &block, int n)
{
int num_of_blocks = 1;
int num_of_threads_per_block = n;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(n>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(n/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
grid.x = num_of_blocks; grid.y=1; grid.z=1;
block.x = num_of_threads_per_block; block.y=1; block.z=1;
}
//**should be initialized with specified time instead of zero
void initArray(int *&X, int n)
{
X = new int[n];
for(int i=0; i<=n-1; i++)
{
X[i] = INF;
}
}
void cudaCopyToDevice(int *X, int *&cX, int n)
{
cudaMalloc((void**)&cX, n*sizeof(int));
cudaMemcpy( cX, X, n*sizeof(int), cudaMemcpyHostToDevice);
}
__global__
void processVertex(int *vertex, int *edge, int *departure, int *duration, int *earliestTime,int *level)
{
int i,u,v,t, lambda;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i>=1024*100) return;
u = 0;
v = edge[i];
t = departure[i];
lambda = duration[i];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
level[v]=1;
}
}
__global__
void processVertices(int iterations, int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, bool *dContinue, int *level)
{
int i,j,u,v,t,lambda,degree;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i >= vCount) return;
//if(iterations==0 && i!=0) return;
//if(level[i] != iterations) return;
u = i;
degree = vertex[u+1] - vertex[u];
for(j=1; j<=degree; j++)
{
v = edge[vertex[u]+j-1];
t = departure[vertex[u]+j-1];
lambda = duration[vertex[u]+j-1];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
*dContinue=true;
//level[v]=iterations+1;
}
}
}
void computeEarliestTimes(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, int *level)
{
int iterations=0;
bool hContinue;
bool *dContinue;
cudaMalloc( (void**) &dContinue, sizeof(bool));
//processVertex<<< 100, 1024>>>(vertex, edge, departure, duration, earliestTime,level);
iterations=1;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
hContinue=false;
cudaMemcpy(dContinue, &hContinue, sizeof(bool), cudaMemcpyHostToDevice) ;
processVertices<<< gridDimension, blockDimension, 0 >>>(iterations,vCount, eCount, vertex, edge, departure, duration, earliestTime,dContinue,level);
// check if kernel execution generated and error
//Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
cudaMemcpy( &hContinue, dContinue, sizeof(bool), cudaMemcpyDeviceToHost) ;
iterations++;
}
while(hContinue);
}
int main(int argc, char *argv[])
{
int vCount, eCount, source;
int *edge, *vertex, *departure, *duration, *earliestTime, *level;
int *cEdge, *cVertex, *cDeparture, *cDuration, *cEarliestTime, *cLevel;
char fileName[100];
struct timeval start,stop;
double time;
strcpy(fileName, argv[1]);
readInput(fileName,vCount, eCount, vertex, edge, departure, duration, source);
initConfiguration(gridDimension,blockDimension, vCount);
cudaCopyToDevice(vertex,cVertex,vCount);
cudaCopyToDevice(edge,cEdge,eCount);
cudaCopyToDevice(departure,cDeparture,eCount);
cudaCopyToDevice(duration,cDuration,eCount);
initArray(earliestTime,vCount);
earliestTime[source]=0; // starting time
cudaCopyToDevice(earliestTime,cEarliestTime,vCount);
//initArray(level,vCount);
// level[source]=0;
// cudaCopyToDevice(level,cLevel,vCount);
gettimeofday(&start,0);
computeEarliestTimes(vCount,eCount,cVertex,cEdge,cDeparture,cDuration,cEarliestTime,cLevel);
gettimeofday(&stop,0);
time = (1000000.0*(stop.tv_sec-start.tv_sec) + stop.tv_usec-start.tv_usec)/1000.0;
cudaMemcpy(earliestTime, cEarliestTime, vCount*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(cEarliestTime);
cudaFree(cEdge);
cudaFree(cVertex);
cudaFree(cDeparture);
cudaFree(cDuration);
//cout<<"Memory copied"<<endl;
for(int i=0;i<=vCount-1;i++)
{
cout<<i<<" "<<earliestTime[i]<<endl;
//fprintf(fp1,"Earliest time for %d is %d\n",i,earliest[i]);
}
cout<<"Time is "<<time<<endl;
return 0;
}
|
13,092 | #include <stdio.h>
#define THREADS 64
__global__ void vecSum(int *a, int *b, int size){
__shared__ int c[THREADS];
int tid = blockDim.x*blockIdx.x+threadIdx.x;
int stid = threadIdx.x;
if(tid < size){
c[stid] = a[tid];
c[stid] += b[tid];
}
__syncthreads();
if(tid < size){
a[tid] = c[stid];
}
}
int main(int argc, char* argv[]){
//initialization code
int size,threads,blocks;
float total_time;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
size = 2048*2048;
blocks = size/THREADS;
threads = THREADS;
while(blocks*threads < size)
blocks++;
int totalSize = size * sizeof(int);
int *a,*b,*dev_a,*dev_b;
cudaMalloc((void**)&dev_a,totalSize);
cudaMalloc((void**)&dev_b,totalSize);
a = (int*) malloc(totalSize);
b = (int*) malloc(totalSize);
//end mallocs
int idx;
for(idx=0;idx<size;idx++){
a[idx] = idx;
b[idx] = idx*2;
}
//copy to dev
cudaMemcpy(dev_a,a,totalSize,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,totalSize,cudaMemcpyHostToDevice);
int iteration = 0;
float avg_time = 0.0;
for(iteration=0;iteration<10;iteration++){
//call kernel and measure times
cudaEventRecord(start,0);
vecSum<<<blocks,threads>>>(dev_a,dev_b,size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&total_time,start,stop);
printf("\n time for %i blocks of %i threads : %f \n",blocks,threads,total_time);
avg_time+=total_time;
}
avg_time/=10.0;
printf("average time for %i size vector mult is %f ",size,avg_time);
//copy back and prints
cudaMemcpy(a,dev_a,totalSize,cudaMemcpyDeviceToHost);
for(idx=0;idx<size;idx+=size/5)
printf("\n a[%i]=%i\n",idx,a[idx]);
//free
free(a);
free(b);
cudaFree(dev_a);
cudaFree(dev_b);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
13,093 | // Last update: 24/12/2020
#include <stdio.h>
#include <stdint.h>
__device__ int bCount = 0;
volatile __device__ int bCount1 = 0;
#define CHECK(call)\
{\
const cudaError_t error = call;\
if (error != cudaSuccess)\
{\
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\
fprintf(stderr, "code: %d, reason: %s\n", error,\
cudaGetErrorString(error));\
exit(1);\
}\
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void printArray(uint32_t * a, int s, int e)
{
for (int i = s; i < e; i++)
printf("%i ", a[i]);
printf("\n");
}
void printArrayInt(int * a, int s, int e)
{
for (int i = s; i < e; i++)
printf("%d ", a[i]);
printf("\n");
}
// Sequential Radix Sort
// "const uint32_t * in" means: the memory region pointed by "in" is read-only
void sortByHost(const uint32_t * in, int n,
uint32_t * out)
{
int * bits = (int *)malloc(n * sizeof(int));
int * nOnesBefore = (int *)malloc(n * sizeof(int));
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
uint32_t * originalSrc = src; // To free memory later
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * dst = out;
// Loop from LSB (Least Significant Bit) to MSB (Most Significant Bit)
// In each loop, sort elements according to the current bit from src to dst
// (using STABLE counting sort)
for (int bitIdx = 0; bitIdx < sizeof(uint32_t) * 8; bitIdx++)
{
// Extract bits
for (int i = 0; i < n; i++)
{
bits[i] = (src[i] >> bitIdx) & 1;
}
// Compute nOnesBefore
nOnesBefore[0] = 0;
for (int i = 1; i < n; i++)
{
nOnesBefore[i] = nOnesBefore[i - 1] + bits[i - 1];
}
// Compute rank and write to dst
int nZeros = n - nOnesBefore[n-1] - bits[n-1];
for (int i = 0; i < n; i++)
{
int rank;
if (bits[i] == 0)
rank = i - nOnesBefore[i];
else
rank = nZeros + nOnesBefore[i];
dst[rank] = src[i];
}
// Swap src and dst
uint32_t * temp = src;
src = dst;
dst = temp;
}
// Does out array contain results?
memcpy(out, src, n * sizeof(uint32_t));
// Free memory
free(originalSrc);
free(bits);
free(nOnesBefore);
}
__global__ void extractBitKernel(const uint32_t * src, int n,
int * bits,
int bitIdx)
{
// PARALELLIZED BIT EXTRACTION
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
bits[i] = int((src[i] >> bitIdx) & 1);
__syncthreads();
}
__global__ void scanKernel(const int * bits, int n, int * nOnesBefore,
volatile int * bSums)
{
// PARALELLIZED EXCLUSIVE SCAN ON BITS
extern __shared__ int s_data[]; // 2 * blockSize
__shared__ int bi;
int tx = threadIdx.x;
if (tx == 0)
{
bi = atomicAdd(&bCount, 1);
}
__syncthreads();
int i1, i2;
i1 = bi * 2 * blockDim.x + tx;
i2 = i1 + blockDim.x;
if (i1 < n)
s_data[threadIdx.x] = (tx == 0) ? 0 : bits[i1 - 1];
if (i2 < n)
s_data[threadIdx.x + blockDim.x] = bits[i2 - 1];
__syncthreads();
if (i1 < n)
{
// Each block does scan with data on SMEM
// Reduction phase
for (int stride = 1; stride < 2 * blockDim.x; stride *= 2)
{
int s_dataIdx = (tx + 1) * 2 * stride - 1; // To avoid warp divergence
if (s_dataIdx < 2 * blockDim.x)
s_data[s_dataIdx] += s_data[s_dataIdx - stride];
__syncthreads();
}
// Post-reduction phase
for (int stride = blockDim.x / 2; stride > 0; stride /= 2)
{
int s_dataIdx = (tx + 1) * 2 * stride - 1 + stride; // Wow
if (s_dataIdx < 2 * blockDim.x)
s_data[s_dataIdx] += s_data[s_dataIdx - stride];
__syncthreads();
}
// Compute bSums
if (bSums != NULL && threadIdx.x == 0)
{
// bSums[bi] is the last element of s_data
// plus
// the bit value at corresponding position
bSums[bi] = s_data[2 * blockDim.x - 1] + bits[(bi + 1) * 2 * blockDim.x - 1];
}
if (tx == 0)
{
if (bi > 0)
{
while (bCount1 < bi) {}
bSums[bi] += bSums[bi - 1];
__threadfence();
}
bCount1 += 1;
}
__syncthreads();
if (bi > 0)
{
s_data[tx] += bSums[bi - 1];
if(i2 < n)
s_data[tx + blockDim.x] += bSums[bi - 1];
}
__syncthreads();
nOnesBefore[i1] = s_data[tx];
if(i2 < n)
nOnesBefore[i2] = s_data[tx + blockDim.x];
__syncthreads();
}
}
__global__ void rank_n_resultKernel(uint32_t * src, int n, uint32_t * dst,
const int * bits,
const int * nOnesBefore,
const int nZeros)
{
// PARALELLIZED RANK COMPUTING AND OUTPUT WRITING
extern __shared__ int s_data[];
int tx = threadIdx.x;
int rank, i1, i2;
i1 = blockIdx.x * 2 * blockDim.x + tx;
i2 = i1 + blockDim.x;
if(i1 < n)
{
s_data[tx] = nOnesBefore[i1];
if (i2 < n)
s_data[tx + blockDim.x] = nOnesBefore[i2];
__syncthreads();
if(bits[i1] == 0)
{
rank = i1 - s_data[tx];
}
else
{
rank = nZeros + s_data[tx];
}
dst[rank] = src[i1];
if(i2 < n)
{
if(bits[i2] == 0)
{
rank = i2 - s_data[tx + blockDim.x];
}
else
{
rank = nZeros + s_data[tx + blockDim.x];
}
dst[rank] = src[i2];
}
}
}
// Parallel Radix Sort
void sortByDevice(const uint32_t * in, int n, uint32_t * out, int blockSize)
{
// TODO
// Data sizes
int blkDataSize = 2 * blockSize;
size_t nBytes = n * sizeof(uint32_t);
size_t helperBytes = n * sizeof(int);
size_t smem = blkDataSize * sizeof(int);
int gridSizeBits = (n - 1) / blockSize + 1;
int gridSize = (n - 1) / blkDataSize + 1;
// Default b's
const int default_b = 0;
uint32_t * src, * d_src, * d_dst, * temp;
int * d_bits, * d_nOnesBefore, * d_bSums;
int lastBit, nZeros, nOnes;
// Host allocation
src = (uint32_t *)malloc(nBytes);
uint32_t * originalSrc = src; // To free memory later
memcpy(src, in, nBytes);
// Device allocation
CHECK(cudaMalloc(&d_src, nBytes));
CHECK(cudaMalloc(&d_dst, nBytes));
CHECK(cudaMalloc(&temp, nBytes));
CHECK(cudaMalloc(&d_bits, helperBytes));
CHECK(cudaMalloc(&d_nOnesBefore, helperBytes));
if (gridSize > 1)
{
CHECK(cudaMalloc(&d_bSums, gridSize * sizeof(int)));
}
else
{
d_bSums = NULL;
}
// Loop from LSB (Least Significant Bit) to MSB (Most Significant Bit)
// In each loop, sort elements according to the current bit from src to dst
// (using STABLE counting sort)
CHECK(cudaMemcpy(d_src, src, nBytes, cudaMemcpyHostToDevice));
for (int bitIdx = 0; bitIdx < sizeof(uint32_t) * 8; bitIdx++)
{
// Extract bits
extractBitKernel<<<gridSizeBits, blockSize>>>(d_src, n, d_bits, bitIdx);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(&lastBit, &d_bits[n - 1], sizeof(int), cudaMemcpyDeviceToHost));
// Compute nOnesBefore
scanKernel<<<gridSize, blockSize, smem>>>(d_bits, n, d_nOnesBefore, d_bSums);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(&nOnes, &d_nOnesBefore[n - 1], sizeof(int), cudaMemcpyDeviceToHost));
// Compute rank and write result
nZeros = n - lastBit - nOnes;
rank_n_resultKernel<<<gridSize, blockSize, smem>>>(d_src, n, d_dst, d_bits, d_nOnesBefore, nZeros);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
// Reset bCount and bCount1
CHECK(cudaMemcpyToSymbol(bCount, &default_b, sizeof(int), 0, cudaMemcpyHostToDevice));
CHECK(cudaMemcpyToSymbol(bCount1, &default_b, sizeof(int), 0, cudaMemcpyHostToDevice));
// Swap d_src and d_dst
CHECK(cudaMemcpy(temp, d_src, nBytes, cudaMemcpyDeviceToDevice));
CHECK(cudaMemcpy(d_src, d_dst, nBytes, cudaMemcpyDeviceToDevice));
CHECK(cudaMemcpy(d_dst, temp, nBytes, cudaMemcpyDeviceToDevice));
}
// Does out array contain results?
CHECK(cudaMemcpy(src, d_src, nBytes, cudaMemcpyDeviceToHost));
memcpy(out, src, nBytes);
// Free host memory
free(originalSrc);
// Free device memory
CHECK(cudaFree(d_src));
CHECK(cudaFree(d_dst));
CHECK(cudaFree(d_bits));
CHECK(cudaFree(d_nOnesBefore));
CHECK(cudaFree(d_bSums));
}
// Radix Sort
void sort(const uint32_t * in, int n,
uint32_t * out,
bool useDevice=false, int blockSize=1)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix Sort by host\n");
sortByHost(in, n, out);
}
else // use device
{
printf("\nRadix Sort by device\n");
sortByDevice(in, n, out, blockSize);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
// int n = 20; // For test by eye
int n = (1 << 24) + 1;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
{
// in[i] = rand() % 255; // For test by eye
in[i] = rand();
}
//printArray(in, 0, 20); // For test by eye
// DETERMINE BLOCK SIZE
int blockSize = 512; // Default
if (argc == 2)
blockSize = atoi(argv[1]);
// SORT BY HOST
sort(in, n, correctOut);
// printArray(correctOut, 0, 20); // For test by eye
// SORT BY DEVICE
sort(in, n, out, true, blockSize);
// printArray(out, 0, 20); // For test by eye
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
13,094 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#include <png.h>
#define MAX_THREADS_PER_BLOCK 1024
#define MAX_BLOCKS_PER_GRID 65535
struct Complex{
double re;
double im;
};
__device__ Complex multiply_complex(Complex c1, Complex c2){
Complex temp;
temp.re = c1.re * c2.re - c1.im * c2.im;
temp.im = c1.im * c2.re + c1.re * c2.im;
return temp;
}
__device__ Complex abs_complex(Complex c){
Complex temp;
temp.re = abs(c.re);
temp.im = abs(c.im);
return temp;
}
__device__ double magnitude_squared(Complex c){
return c.re * c.re + c.im * c.im;
}
__device__ int get_color_value(int iter){
// Some arbitrary color values for now, can be improved with linear coloring algorithm
int val = iter + 2; // value is increased to make the image a bit brighter
// R G B values based on iter, scaled by arbitrary coefficents to get some aesthetic colors
int color = 0 | (val * 2) | (val * 4 << 8) | ((val * 10) << 16);
return color;
}
__global__ void render_mandelbrot_k(int h, int w, int max_iter, int *results){
long my_index = blockIdx.x * blockDim.x + threadIdx.x;
double a, b;
Complex c1, c2;
int x = my_index % w;
int y = my_index / w;
a = ((double)x - ((double)w / 2.0)) * 4.0 / (double)w;
b = ((double)y - ((double)h / 2.0)) * 4.0 / (double)w;
c1 = {0.0, 0.0};
c2 = {a, b};
int iter = 0;
while (magnitude_squared(c1) < 4 && iter++ < max_iter){
c1 = multiply_complex(c1, c1);
c1.re += c2.re;
c1.im += c2.im;
}
if(iter < max_iter) results[my_index] = get_color_value(iter);
else results[my_index] = 0;
}
__host__ void error_exit(const char *error_message){
printf("%s\n", error_message);
exit(-1);
}
__host__ void save_as_png(char *filename, int width, int height, int* pixel_values) {
FILE *fp = fopen(filename, "wb");
png_bytep png_row = NULL;
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct(png_ptr);
setjmp(png_jmpbuf(png_ptr));
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height,
8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
png_text title_text;
title_text.compression = PNG_TEXT_COMPRESSION_NONE;
title_text.key = "Title";
title_text.text = "Mandelbrot set visualization";
png_set_text(png_ptr, info_ptr, &title_text, 1);
png_write_info(png_ptr, info_ptr);
png_row = (png_bytep) malloc(sizeof(png_byte) * width * 3);
for (int row = 0; row < height; row++){
for(int col = 0; col < width; col++){
png_row[col * 3] = (png_byte)(pixel_values[row * width + col] & 0xFF); // R
png_row[col * 3 + 1] = (png_byte)((pixel_values[row * width + col] >> 8) & 0xFF); // G
png_row[col * 3 + 2] = (png_byte)((pixel_values[row * width + col] >> 16) & 0xFF); // B
}
png_write_row(png_ptr, png_row);
}
png_write_end(png_ptr, info_ptr);
fclose(fp);
png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
free(png_row);
}
__host__ void parse_paramaters(int argc, char* argv[], int* height, int* width, int* max_iter, char** filename){
if (argc > 1){
for(int i = 1; i < argc; i+=2){
if(argv[i+1] != NULL && argv[i+1][0] != '-'){
if (strcmp(argv[i], "-o") == 0){
*filename = argv[i+1];
}
else if(strcmp(argv[i], "-w") == 0){
int w = atoi(argv[i+1]);
if (w > 0) *width = w;
else error_exit("Invalid width value.");
}
else if(strcmp(argv[i], "-h") == 0){
int h = atoi(argv[i+1]);
if (h > 0) *height = h;
else error_exit("Invalid height value.");
}
else if(strcmp(argv[i], "-iter") == 0){
int iter = atoi(argv[i+1]);
if (iter > 0) *max_iter = iter;
else error_exit("Invalid number of iterations.");
}
else
error_exit("Invalid input paramaters");
}
else error_exit("No value supplied for argument");
}
}
}
int main (int argc, char* argv[]){
struct timeval start, end;
//default values
int width = 1024;
int height = 1024;
int max_iter = 1000;
char* filename = "Mandelbrot.png";
parse_paramaters(argc, argv, &height, &width, &max_iter, &filename);
long long pixel_count = (long long)width * (long long)height;
// program is designed for each thread to compute one pixel, so pixel count cannot exceed maximum thread count
if (pixel_count > (MAX_THREADS_PER_BLOCK * MAX_BLOCKS_PER_GRID)){
error_exit("Requested resolution is not supported. Try lowering height and/or width values.");
}
int threads_in_block = MAX_THREADS_PER_BLOCK;
int blocks_in_grid = (int)ceil((double)(height * width) / (double)threads_in_block);
gettimeofday(&start, NULL);
long size = height * width * sizeof(double);
int* host_result = (int*)malloc(size);
if (!host_result){
error_exit("Error while allocating memory on the host, exiting.");
}
int* device_result = NULL;
if (cudaSuccess != cudaMalloc((void **)&device_result, size)){
error_exit("Error while allocating memory on the device, exiting.");
}
render_mandelbrot_k<<<blocks_in_grid, threads_in_block>>>(height,
width,
max_iter,
device_result);
if (cudaSuccess != cudaMemcpy(host_result, device_result, size, cudaMemcpyDeviceToHost)){
error_exit("Error while copying results from device to host, exiting.");
}
gettimeofday(&end, NULL);
long long elapsed = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
printf("GPU calculations took %lld microseconds.\n", elapsed);
save_as_png(filename, width, height, host_result);
free(host_result);
if (cudaSuccess != cudaFree(device_result)){
error_exit("Error while deallocating memory on the device, exiting.");
}
}
|
13,095 | // Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible?
// Assigns every element in an array with its index.
// nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
const float input[16] = { 1.f, 2.f, 4.f, 8.f,
16.f, 32.f, 64.f, 128.f,
256.f, 512.f, 1024.f, 2048.f,
4096.f, 8192.f, 16284.f, 32568.f };
__global__
void simple(float *c)
{
c[threadIdx.x] = sqrtf(c[threadIdx.x]);
}
int main()
{
float *c = new float[N];
float *cd;
const int size = N*sizeof(float);
cudaMalloc( (void**)&cd, size );
cudaMemcpy(cd, (void*)input, size, cudaMemcpyHostToDevice);
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
simple<<<dimGrid, dimBlock>>>(cd);
cudaThreadSynchronize();
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
cudaFree( cd );
for (int i = 0; i < N; i++)
printf("%f ", c[i]);
printf("\n");
for (int i = 0; i < N; i++)
printf("%f ", sqrtf(input[i]));
delete[] c;
printf("done\n");
return EXIT_SUCCESS;
}
|
13,096 | #include <stdio.h>
#include "sys/time.h"
#include "string.h"
#include <stdbool.h>
#include <math.h>
#include <thrust/host_vector.h>
// All Constants, Globals, Enums
// Used for low level chess functions, such as adding a move, determining if a piece is under attack, etc.
// Some functions are used on the device, so they have a device version
enum side
{
w,
b
};
enum pieces
{
emSq,
wP,
wN,
wB,
wR,
wQ,
wK,
offBoard = 8,
bP,
bN,
bB,
bR,
bQ,
bK
};
enum castling
{
K = 1,
Q = 2,
k = 4,
q = 8
};
__device__ enum dev_castling { dev_K = 1,
dev_Q = 2,
dev_k = 4,
dev_q = 8 };
enum squares
{
a1 = 0,
b1,
c1,
d1,
e1,
f1,
g1,
h1,
a2 = 16,
b2,
c2,
d2,
e2,
f2,
g2,
h2,
a3 = 32,
b3,
c3,
d3,
e3,
f3,
g3,
h3,
a4 = 48,
b4,
c4,
d4,
e4,
f4,
g4,
h4,
a5 = 64,
b5,
c5,
d5,
e5,
f5,
g5,
h5,
a6 = 80,
b6,
c6,
d6,
e6,
f6,
g6,
h6,
a7 = 96,
b7,
c7,
d7,
e7,
f7,
g7,
h7,
a8 = 112,
b8,
c8,
d8,
e8,
f8,
g8,
h8,
noSq = -99
};
enum moveFlags
{
allPos,
captures
};
// attack directions
const int pawnAttacks[4] = {15, 17, -15, -17};
const int knightAttacks[8] = {31, 33, 14, 18, -31, -33, -14, -18};
const int kingAttacks[8] = {1, 15, 16, 17, -1, -15, -16, -17};
const int bishopAttacks[4] = {15, 17, -15, -17};
const int rookAttacks[4] = {1, 16, -1, -16};
__device__ const int dev_pawnAttacks[4] = {15, 17, -15, -17};
__device__ const int dev_knightAttacks[8] = {31, 33, 14, 18, -31, -33, -14, -18};
__device__ const int dev_kingAttacks[8] = {1, 15, 16, 17, -1, -15, -16, -17};
__device__ const int dev_bishopAttacks[4] = {15, 17, -15, -17};
__device__ const int dev_rookAttacks[4] = {1, 16, -1, -16};
// tracking whether kings or rooks moved
const int castling[128] =
{
13, 15, 15, 15, 12, 15, 15, 14, 8, 8, 8, 8, 8, 8, 8, 8,
15, 15, 15, 15, 15, 15, 15, 15, 8, 8, 8, 8, 8, 8, 8, 8,
15, 15, 15, 15, 15, 15, 15, 15, 8, 8, 8, 8, 8, 8, 8, 8,
15, 15, 15, 15, 15, 15, 15, 15, 8, 8, 8, 8, 8, 8, 8, 8,
15, 15, 15, 15, 15, 15, 15, 15, 8, 8, 8, 8, 8, 8, 8, 8,
15, 15, 15, 15, 15, 15, 15, 15, 8, 8, 8, 8, 8, 8, 8, 8,
15, 15, 15, 15, 15, 15, 15, 15, 8, 8, 8, 8, 8, 8, 8, 8,
7, 15, 15, 15, 3, 15, 15, 11, 8, 8, 8, 8, 8, 8, 8, 8};
__device__ const int dev_castling[128] =
{
13, 15, 15, 15, 12, 15, 15, 14, 8, 8, 8, 8, 8, 8, 8, 8,
15, 15, 15, 15, 15, 15, 15, 15, 8, 8, 8, 8, 8, 8, 8, 8,
15, 15, 15, 15, 15, 15, 15, 15, 8, 8, 8, 8, 8, 8, 8, 8,
15, 15, 15, 15, 15, 15, 15, 15, 8, 8, 8, 8, 8, 8, 8, 8,
15, 15, 15, 15, 15, 15, 15, 15, 8, 8, 8, 8, 8, 8, 8, 8,
15, 15, 15, 15, 15, 15, 15, 15, 8, 8, 8, 8, 8, 8, 8, 8,
15, 15, 15, 15, 15, 15, 15, 15, 8, 8, 8, 8, 8, 8, 8, 8,
7, 15, 15, 15, 3, 15, 15, 11, 8, 8, 8, 8, 8, 8, 8, 8};
// material weight of pieces
const int materialWeight[15] =
{
0, 100, 300, 350, 525, 1000, 10000, 0,
0, -100, -300, -350, -525, -1000, -10000};
__device__ const int dev_materialWeight[15] =
{
0, 100, 300, 350, 525, 1000, 10000, 0,
0, -100, -300, -350, -525, -1000, -10000};
// piece placement tables, as pieces have different values depending on their position
const int Pawns[128] =
{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, -10, -10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0,
5, 5, 5, 20, 20, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0,
10, 10, 10, 20, 20, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
10, 10, 10, 20, 20, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
20, 20, 20, 30, 30, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const int Knights[128] =
{
0, -10, 0, 0, 0, 0, -10, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 10, 20, 20, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5, 10, 15, 20, 20, 15, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0,
5, 10, 10, 20, 20, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const int Bishops[128] =
{
0, 0, -10, 0, 0, -10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 10, 15, 15, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 10, 20, 20, 20, 20, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 10, 15, 20, 20, 15, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 10, 15, 15, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const int Rooks[128] =
{
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
25, 25, 25, 25, 25, 25, 25, 25, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const int Kings[128] =
{
5, 5, 0, -10, -10, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 5, -10, -10, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 10, 10, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 20, 20, 20, 20, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 20, 20, 20, 20, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 10, 10, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
// Mirror evaluation tables for opposite side
const int Mirror[128] =
{
a8, b8, c8, d8, e8, f8, g8, h8, 0, 0, 0, 0, 0, 0, 0, 0,
a7, b7, c7, d7, e7, f7, g7, h7, 0, 0, 0, 0, 0, 0, 0, 0,
a6, b6, c6, d6, e6, f6, g6, h6, 0, 0, 0, 0, 0, 0, 0, 0,
a5, b5, c5, d5, e5, f5, g5, h5, 0, 0, 0, 0, 0, 0, 0, 0,
a4, b4, c4, d4, e4, f4, g4, h4, 0, 0, 0, 0, 0, 0, 0, 0,
a3, b3, c3, d3, e3, f3, g3, h3, 0, 0, 0, 0, 0, 0, 0, 0,
a2, b2, c2, d2, e2, f2, g2, h2, 0, 0, 0, 0, 0, 0, 0, 0,
a1, b1, c1, d1, e1, f1, g1, h1, 0, 0, 0, 0, 0, 0, 0, 0};
__device__ const int dev_Pawns[128] =
{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, -10, -10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0,
5, 5, 5, 20, 20, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0,
10, 10, 10, 20, 20, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
10, 10, 10, 20, 20, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0,
20, 20, 20, 30, 30, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
__device__ const int dev_Knights[128] =
{
0, -10, 0, 0, 0, 0, -10, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 10, 20, 20, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5, 10, 15, 20, 20, 15, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0,
5, 10, 10, 20, 20, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
__device__ const int dev_Bishops[128] =
{
0, 0, -10, 0, 0, -10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 10, 15, 15, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 10, 20, 20, 20, 20, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 10, 15, 20, 20, 15, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 10, 15, 15, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
__device__ const int dev_Rooks[128] =
{
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
25, 25, 25, 25, 25, 25, 25, 25, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
__device__ const int dev_Kings[128] =
{
5, 5, 0, -10, -10, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 5, -10, -10, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 10, 10, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 20, 20, 20, 20, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 20, 20, 20, 20, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 10, 10, 10, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
__device__ const int dev_Mirror[128] =
{
a8, b8, c8, d8, e8, f8, g8, h8, 0, 0, 0, 0, 0, 0, 0, 0,
a7, b7, c7, d7, e7, f7, g7, h7, 0, 0, 0, 0, 0, 0, 0, 0,
a6, b6, c6, d6, e6, f6, g6, h6, 0, 0, 0, 0, 0, 0, 0, 0,
a5, b5, c5, d5, e5, f5, g5, h5, 0, 0, 0, 0, 0, 0, 0, 0,
a4, b4, c4, d4, e4, f4, g4, h4, 0, 0, 0, 0, 0, 0, 0, 0,
a3, b3, c3, d3, e3, f3, g3, h3, 0, 0, 0, 0, 0, 0, 0, 0,
a2, b2, c2, d2, e2, f2, g2, h2, 0, 0, 0, 0, 0, 0, 0, 0,
a1, b1, c1, d1, e1, f1, g1, h1, 0, 0, 0, 0, 0, 0, 0, 0};
// Structs used for represending the board, search, and possible moves
typedef struct
{
int move;
int score;
} Move;
typedef struct
{
Move moves[256];
int moveCount;
} Movelist;
typedef struct
{
int position[128];
int side;
int enPassant;
int castle;
int kingSq[2];
int ply;
}
Chessboard;
typedef struct
{
long nodes;
double fhf, fh;
int bestMove;
int bestScore;
}
Search;
// Global vectors that hold all the boards and searches that get sent to the GPU
// They are global as I was having bugs with passing them by reference and security or missuse is not an issue here.
thrust::host_vector<Chessboard> boards;
thrust::host_vector<Search> searches;
// The following are a series of Macros that significantly clean up code presentation
// I may have gotten carried away ...
#define MirrorSq(sq) Mirror[sq]
#define DevMirrorSq(sq) dev_Mirror[sq]
// 0x88 math
#define IsOnBoard(sq) (!(sq & 0x88))
#define fr2sq(file, rank) (rank * 16 - file)
#define parse2sq(file, rank) ((rank - 1) * 16 + file)
#define GetFile(sq) (sq & 7)
#define GetRank(sq) (sq >> 4)
#define rank_7 (fromSq >= a7 && fromSq <= h7)
#define rank_2 (fromSq >= a2 && fromSq <= h2)
// Convertions
#define GetFileChar(sq) (GetFile(sq) + 'a')
#define GetRankChar(sq) (GetRank(sq) + '1')
// Char type
#define isDigit(char) (char >= '0' && char <= '9')
#define isPieceChar(piece) ((*fen >= 'a' && *fen <= 'z') || ((*fen >= 'A' && *fen <= 'Z')))
// White or black
#define isBlack(toSq) (board->position[toSq] >= bN && board->position[toSq] <= bQ)
#define isWhite(toSq) (board->position[toSq] >= wN && board->position[toSq] <= wQ)
#define isBlackPiece(toSq) (board->position[toSq] >= bP && board->position[toSq] <= bK)
#define DevisBlackPiece(toSq) (board->position[toSq] >= bP && board->position[toSq] <= bK)
#define isWhitePiece(toSq) (board->position[toSq] >= wP && board->position[toSq] <= wK)
#define DevisWhitePiece(toSq) (board->position[toSq] >= wP && board->position[toSq] <= wK)
// Quick board access
#define pos(sq) board->position[sq]
#define side board->side
#define enPassant board->enPassant
#define castle board->castle
#define kingSq(col) board->kingSq[col]
#define DevkingSq(col) board->kingSq[col]
#define ply board->ply
// Board loops
#define LoopBoard for (int sq = 0; sq < 128; ++sq)
#define RankLoop for (int rank = 8; rank >= 1; rank--)
#define FileLoop for (int file = 16; file >= 1; file--)
// Board methods
#define SetSq(sq, piece) (pos(sq) = piece)
#define DevSetSq(sq, piece) (pos(sq) = piece)
#define GetSq(sq) pos(sq)
#define DevGetSq(sq) pos(sq)
#define PrintSquare(sq) \
if (sq == -99) \
printf("no"); \
else \
printf("%c%c", GetFileChar(sq), GetRankChar(sq));
// #define PrintPromotedPiece(piece) printf("%c", promotedPieceChar[piece])
// Init board
#define ResetPosition(board) \
LoopBoard { IsOnBoard(sq) ? SetSq(sq, emSq) : SetSq(sq, offBoard); }
#define ResetStats(board) \
side = 0; \
enPassant = noSq; \
castle = 0; \
ply = 0;
#define ResetBoard(board) \
ResetPosition(board); \
ResetStats(board)
// Print board
#define PrintPosition(board) \
printf("\n"); \
RankLoop \
{ \
printf(" %d", rank); \
FileLoop \
{ \
if (GetSq(fr2sq(file, rank)) != 8) \
printf(" %i", GetSq(fr2sq(file, rank))); \
} \
printf("\n"); \
}
#define PrintStats(board) \
printf("\n a b c d e f g h\n\n"); \
printf(" Side: %s\n", side ? "black" : "white"); \
printf(" EnPassant: "); \
PrintSquare(enPassant); \
printf("\n"); \
printf(" Castling: %c%c%c%c\n", \
castle &K ? 'K' : '-', \
castle &Q ? 'Q' : '-', \
castle &k ? 'k' : '-', \
castle &q ? 'q' : '-'); \
printf("\n\n");
#define PrintBoard(board) \
PrintPosition(board); \
PrintStats(board);
#define DevPrintPosition(board) \
printf("\n"); \
RankLoop \
{ \
printf(" %d", rank); \
FileLoop \
{ \
if (GetSq(fr2sq(file, rank)) != 8) \
printf(" %i", GetSq(fr2sq(file, rank))); \
} \
printf("\n"); \
}
#define DevPrintStats(dev_board) \
printf("\n a b c d e f g h\n\n"); \
printf(" Side: %s\n", side ? "black" : "white"); \
printf(" EnPassant: "); \
PrintSquare(enPassant); \
printf("\n"); \
printf(" Castling: %c%c%c%c\n", \
castle &K ? 'K' : '-', \
castle &Q ? 'Q' : '-', \
castle &k ? 'k' : '-', \
castle &q ? 'q' : '-'); \
printf("\n\n");
#define DevPrintBoard(dev_board) \
DevPrintPosition(dev_board); \
DevPrintStats(dev_board);
#define SetMove(f, t, prom, cap, pawn, e, cas) \
((f) | (t << 7) | (prom << 14) | (cap << 18) | (pawn << 19) | (e << 20) | (cas << 21))
#define DevSetMove(f, t, prom, cap, pawn, e, cas) \
((f) | (t << 7) | (prom << 14) | (cap << 18) | (pawn << 19) | (e << 20) | (cas << 21))
#define GetMoveSource(move) (move & 0x7f)
#define DevGetMoveSource(move) (move & 0x7f)
#define GetMoveTarget(move) ((move >> 7) & 0x7f)
#define DevGetMoveTarget(move) ((move >> 7) & 0x7f)
#define GetMovePromPiece(move) ((move >> 14) & 0xf)
#define DevGetMovePromPiece(move) ((move >> 14) & 0xf)
#define GetMoveCaptureFlag(move) ((move >> 18) & 1)
#define DevGetMoveCaptureFlag(move) ((move >> 18) & 1)
#define GetMovePawnStartFlag(move) ((move >> 19) & 1)
#define DevGetMovePawnStartFlag(move) ((move >> 19) & 1)
#define GetMoveEnPassantFlag(move) ((move >> 20) & 1)
#define DevGetMoveEnPassantFlag(move) ((move >> 20) & 1)
#define GetMoveCastleFlag(move) ((move >> 21) & 1)
#define DevGetMoveCastleFlag(move) ((move >> 21) & 1)
#define SortMoves \
for (int nextMove = moveNum + 1; nextMove < list->moveCount; ++nextMove) \
{ \
if (list->moves[moveNum].score < list->moves[nextMove].score) \
{ \
int tempScore = list->moves[moveNum].score; \
int tempMove = list->moves[moveNum].move; \
list->moves[moveNum].score = list->moves[nextMove].score; \
list->moves[nextMove].score = tempScore; \
list->moves[moveNum].move = list->moves[nextMove].move; \
list->moves[nextMove].move = tempMove; \
} \
}
#define PrintMove(move) \
printf(" "); \
PrintSquare(GetMoveSource(move)); \
PrintSquare(GetMoveTarget(move));
#define LoopMoves for (int moveCount = 0; moveCount < list->moveCount; ++moveCount)
#define PrintMoveList(list) \
LoopMoves \
{ \
PrintMove(list->moves[moveCount].move); \
printf(" SCORE: %d\n", list->moves[moveCount].score); \
} \
printf("\n Total moves: %d\n\n", list->moveCount);
#define TakeBack(board, boardStored) board[0] = boardStored[0];
#define DevTakeBack(board, boardStored) board[0] = boardStored[0];
#define InCheck(board, sideToMove) \
IsSquareAttacked(board, sideToMove ? kingSq(b) : kingSq(w), sideToMove ^ 1)
#define DevInCheck(board, sideToMove) \
DevIsSquareAttacked(board, sideToMove ? kingSq(b) : kingSq(w), sideToMove ^ 1)
/*****The real fun begins here!*****/
// Function that determines whether a piece on a given square is being attacked
// The function works by checking all possible positions that a given piece type can attack the given piece.
static inline int IsSquareAttacked(Chessboard *board, int sq, int attSide)
{
// by pawns
if (!attSide)
{
if (!((sq - 15) & 0x88) && (GetSq(sq - 15) == wP))
return 1;
if (!((sq - 17) & 0x88) && (GetSq(sq - 17) == wP))
return 1;
}
else
{
if (!((sq + 15) & 0x88) && (GetSq(sq + 15) == bP))
return 1;
if (!((sq + 17) & 0x88) && (GetSq(sq + 17) == bP))
return 1;
}
// by knights
for (int i = 0; i < 8; ++i)
{
int dir = sq + knightAttacks[i];
int delta = GetSq(dir);
if (!(dir & 0x88))
{
if (attSide ? delta == bN : delta == wN)
return 1;
}
}
// by bishops and queens
for (int i = 0; i < 4; ++i)
{
int dir = sq + bishopAttacks[i];
while (!(dir & 0x88))
{
int delta = GetSq(dir);
if (attSide ? (delta == bB) || (delta == bQ) : (delta == wB) || (delta == wQ))
return 1;
else if (delta != 0)
break;
dir += bishopAttacks[i];
}
}
// by rooks and queens
for (int i = 0; i < 4; ++i)
{
int dir = sq + rookAttacks[i];
while (!(dir & 0x88))
{
int delta = GetSq(dir);
if (attSide ? (delta == bR) || (delta == bQ) : (delta == wR) || (delta == wQ))
return 1;
else if (delta != 0)
break;
dir += rookAttacks[i];
}
}
// by kings
for (int i = 0; i < 8; ++i)
{
int dir = sq + kingAttacks[i];
int delta = GetSq(dir);
if (!(dir & 0x88))
{
if (attSide ? delta == bK : delta == wK)
return 1;
}
}
return 0;
}
// device version of the function above
__device__ static inline int DevIsSquareAttacked(Chessboard *board, int sq, int attSide)
{
// by pawns
if (!attSide)
{
if (!((sq - 15) & 0x88) && (GetSq(sq - 15) == wP))
return 1;
if (!((sq - 17) & 0x88) && (GetSq(sq - 17) == wP))
return 1;
}
else
{
if (!((sq + 15) & 0x88) && (GetSq(sq + 15) == bP))
return 1;
if (!((sq + 17) & 0x88) && (GetSq(sq + 17) == bP))
return 1;
}
// by knights
for (int i = 0; i < 8; ++i)
{
int dir = sq + dev_knightAttacks[i];
int delta = GetSq(dir);
if (!(dir & 0x88))
{
if (attSide ? delta == bN : delta == wN)
return 1;
}
}
// by bishops and queens
for (int i = 0; i < 4; ++i)
{
int dir = sq + dev_bishopAttacks[i];
while (!(dir & 0x88))
{
int delta = GetSq(dir);
if (attSide ? (delta == bB) || (delta == bQ) : (delta == wB) || (delta == wQ))
return 1;
else if (delta != 0)
break;
dir += dev_bishopAttacks[i];
}
}
// by rooks and queens
for (int i = 0; i < 4; ++i)
{
int dir = sq + dev_rookAttacks[i];
while (!(dir & 0x88))
{
int delta = GetSq(dir);
if (attSide ? (delta == bR) || (delta == bQ) : (delta == wR) || (delta == wQ))
return 1;
else if (delta != 0)
break;
dir += dev_rookAttacks[i];
}
}
// by kings
for (int i = 0; i < 8; ++i)
{
int dir = sq + dev_kingAttacks[i];
int delta = GetSq(dir);
if (!(dir & 0x88))
{
if (attSide ? delta == bK : delta == wK)
return 1;
}
}
return 0;
}
// Function that adds a move to our list of moves.
static inline void AddMove(Chessboard *board, Search *info, Movelist *list, int move)
{
list->moves[list->moveCount].move = move;
list->moveCount++;
}
// device version of the function above
__device__ static inline void DevAddMove(Chessboard *board, Search *info, Movelist *list, int move)
{
//printf("here");
list->moves[list->moveCount].move = move;
list->moveCount++;
}
// Generates all possible moves, some of which are illegal
// The function works by looping through all squares and on each square checking
// the type of piece it is and then from there it adds all moves to our search and list
static inline void GenerateMoves(Chessboard *board, Search *info, Movelist *list)
{
list->moveCount = 0;
for (int sq = 0; sq < 128; ++sq)
{
if (!(sq & 0x88))
{
// skip empty squares
if (!GetSq(sq))
continue;
int fromSq = sq;
if (!side)
{
if (GetSq(fromSq) == wP)
{
// pawn quiet move
if (!((fromSq + 16) & 0x88) && !GetSq(fromSq + 16))
{
if (rank_7 && !GetSq(fromSq + 16))
{
AddMove(board, info, list, SetMove(fromSq, fromSq + 16, wN, 0, 0, 0, 0));
AddMove(board, info, list, SetMove(fromSq, fromSq + 16, wB, 0, 0, 0, 0));
AddMove(board, info, list, SetMove(fromSq, fromSq + 16, wR, 0, 0, 0, 0));
AddMove(board, info, list, SetMove(fromSq, fromSq + 16, wQ, 0, 0, 0, 0));
}
else
{
AddMove(board, info, list, SetMove(fromSq, fromSq + 16, 0, 0, 0, 0, 0));
if (rank_2 && !GetSq(fromSq + 32))
AddMove(board, info, list, SetMove(fromSq, fromSq + 32, 0, 0, 1, 0, 0));
}
}
// pawn capture move
for (int i = 0; i < 4; ++i)
{
int dir = fromSq + pawnAttacks[i];
// en passant move
if (pawnAttacks[i] > 0 && !(dir & 0x88))
{
if (enPassant != noSq)
{
if (dir == enPassant)
AddMove(board, info, list, SetMove(fromSq, dir, 0, 1, 0, 1, 0));
}
}
if ((pawnAttacks[i] > 0) && !(dir & 0x88) && isBlackPiece(dir))
{
if (rank_7)
{
AddMove(board, info, list, SetMove(fromSq, dir, wN, 1, 0, 0, 0));
AddMove(board, info, list, SetMove(fromSq, dir, wB, 1, 0, 0, 0));
AddMove(board, info, list, SetMove(fromSq, dir, wR, 1, 0, 0, 0));
AddMove(board, info, list, SetMove(fromSq, dir, wQ, 1, 0, 0, 0));
}
else
{
AddMove(board, info, list, SetMove(fromSq, dir, 0, 1, 0, 0, 0));
}
}
}
}
// castling
if (GetSq(fromSq) == wK)
{
if (castle & K)
{
if (!GetSq(f1) && !GetSq(g1))
{
if (!IsSquareAttacked(board, e1, b) && !IsSquareAttacked(board, f1, b))
AddMove(board, info, list, SetMove(e1, g1, 0, 0, 0, 0, 1));
}
}
if (castle & Q)
{
if (!GetSq(d1) && !GetSq(c1) && !GetSq(b1))
{
if (!IsSquareAttacked(board, e1, b) && !IsSquareAttacked(board, d1, b))
AddMove(board, info, list, SetMove(e1, c1, 0, 0, 0, 0, 1));
}
}
}
}
else
{
if (GetSq(fromSq) == bP)
{
// pawn quiet move
if (!((fromSq - 16) & 0x88) && !GetSq(fromSq - 16))
{
if (rank_2 && !GetSq(fromSq - 16))
{
AddMove(board, info, list, SetMove(fromSq, fromSq - 16, bN, 0, 0, 0, 0));
AddMove(board, info, list, SetMove(fromSq, fromSq - 16, bB, 0, 0, 0, 0));
AddMove(board, info, list, SetMove(fromSq, fromSq - 16, bR, 0, 0, 0, 0));
AddMove(board, info, list, SetMove(fromSq, fromSq - 16, bQ, 0, 0, 0, 0));
}
else
{
AddMove(board, info, list, SetMove(fromSq, fromSq - 16, 0, 0, 0, 0, 0));
if (rank_7 && !GetSq(fromSq - 32))
AddMove(board, info, list, SetMove(fromSq, fromSq - 32, 0, 0, 1, 0, 0));
}
}
// pawn capture move
for (int i = 0; i < 4; ++i)
{
int dir = fromSq + pawnAttacks[i];
// en passant move
if (pawnAttacks[i] < 0 && !(dir & 0x88))
{
if (enPassant != noSq)
{
if (dir == enPassant)
AddMove(board, info, list, SetMove(fromSq, dir, 0, 0, 0, 1, 0));
}
}
if ((pawnAttacks[i] < 0) && !(dir & 0x88) && isWhitePiece(dir))
{
if (rank_2)
{
AddMove(board, info, list, SetMove(fromSq, dir, bN, 1, 0, 0, 0));
AddMove(board, info, list, SetMove(fromSq, dir, bB, 1, 0, 0, 0));
AddMove(board, info, list, SetMove(fromSq, dir, bR, 1, 0, 0, 0));
AddMove(board, info, list, SetMove(fromSq, dir, bQ, 1, 0, 0, 0));
}
else
{
AddMove(board, info, list, SetMove(fromSq, dir, 0, 1, 0, 0, 0));
}
}
}
}
// castling
if (GetSq(fromSq) == bK)
{
if (castle & k)
{
if (!GetSq(f8) && !GetSq(g8))
{
if (!IsSquareAttacked(board, e8, w) && !IsSquareAttacked(board, f8, w))
AddMove(board, info, list, SetMove(e8, g8, 0, 0, 0, 0, 1));
}
}
if (castle & q)
{
if (!GetSq(d8) && !GetSq(c8) && !GetSq(b8))
{
if (!IsSquareAttacked(board, e8, w) && !IsSquareAttacked(board, d8, w))
AddMove(board, info, list, SetMove(e8, c8, 0, 0, 0, 0, 1));
}
}
}
}
// knights
if (side ? GetSq(fromSq) == bN : GetSq(fromSq) == wN)
{
for (int i = 0; i < 8; ++i)
{
int dir = sq + knightAttacks[i];
int delta = GetSq(dir);
if (!(dir & 0x88))
{
if (side ? (!delta || isWhitePiece(dir)) : (!delta || isBlackPiece(dir)))
{
if (!delta)
AddMove(board, info, list, SetMove(fromSq, dir, 0, 0, 0, 0, 0));
else
AddMove(board, info, list, SetMove(fromSq, dir, 0, 1, 0, 0, 0));
}
}
}
}
// bishops and queens
if (side ? (GetSq(fromSq) == bB) || (GetSq(fromSq) == bQ) : (GetSq(fromSq) == wB) || (GetSq(fromSq) == wQ))
{
for (int i = 0; i < 4; ++i)
{
int dir = sq + bishopAttacks[i];
while (!(dir & 0x88))
{
int delta = GetSq(dir);
// if hits own piece
if (side ? isBlackPiece(dir) : isWhitePiece(dir))
break;
// if hits opponent's piece
else if (side ? isWhitePiece(dir) : isBlackPiece(dir))
{
AddMove(board, info, list, SetMove(fromSq, dir, 0, 1, 0, 0, 0));
break;
}
// on empty square
else if (!delta)
{
AddMove(board, info, list, SetMove(fromSq, dir, 0, 0, 0, 0, 0));
}
dir += bishopAttacks[i];
}
}
}
// rooks and queens
if (side ? (GetSq(fromSq) == bR) || (GetSq(fromSq) == bQ) : (GetSq(fromSq) == wR) || (GetSq(fromSq) == wQ))
{
for (int i = 0; i < 4; ++i)
{
int dir = sq + rookAttacks[i];
while (!(dir & 0x88))
{
int delta = GetSq(dir);
// if hits own piece
if (side ? isBlackPiece(dir) : isWhitePiece(dir))
break;
// if hits opponent's piece
else if (side ? isWhitePiece(dir) : isBlackPiece(dir))
{
AddMove(board, info, list, SetMove(fromSq, dir, 0, 1, 0, 0, 0));
break;
}
// on empty square
else if (!delta)
{
AddMove(board, info, list, SetMove(fromSq, dir, 0, 0, 0, 0, 0));
}
dir += rookAttacks[i];
}
}
}
// kings
if (side ? GetSq(fromSq) == bK : GetSq(fromSq) == wK)
{
for (int i = 0; i < 8; ++i)
{
int dir = sq + kingAttacks[i];
int delta = GetSq(dir);
if (!(dir & 0x88))
{
if (side ? (!delta || isWhitePiece(dir)) : (!delta || isBlackPiece(dir)))
{
if (!delta)
AddMove(board, info, list, SetMove(fromSq, dir, 0, 0, 0, 0, 0));
else
AddMove(board, info, list, SetMove(fromSq, dir, 0, 1, 0, 0, 0));
}
}
}
}
}
}
}
// device version of the function above
__device__ static void DevGenerateMoves(Chessboard *board, Search *info, Movelist *list)
{
list->moveCount = 0;
//printf("some\n");
for (int sq = 0; sq < 128; ++sq)
{
if (!(sq & 0x88))
{
// skip empty squares
if (!DevGetSq(sq))
continue;
int fromSq = sq;
if (!side)
{
if (DevGetSq(fromSq) == wP)
{
// pawn quiet move
if (!((fromSq + 16) & 0x88) && !DevGetSq(fromSq + 16))
{
if (rank_7 && !DevGetSq(fromSq + 16))
{
DevAddMove(board, info, list, DevSetMove(fromSq, fromSq + 16, wN, 0, 0, 0, 0));
DevAddMove(board, info, list, DevSetMove(fromSq, fromSq + 16, wB, 0, 0, 0, 0));
DevAddMove(board, info, list, DevSetMove(fromSq, fromSq + 16, wR, 0, 0, 0, 0));
DevAddMove(board, info, list, DevSetMove(fromSq, fromSq + 16, wQ, 0, 0, 0, 0));
}
else
{
DevAddMove(board, info, list, DevSetMove(fromSq, fromSq + 16, 0, 0, 0, 0, 0));
if (rank_2 && !DevGetSq(fromSq + 32))
DevAddMove(board, info, list, DevSetMove(fromSq, fromSq + 32, 0, 0, 1, 0, 0));
}
}
// pawn capture move
for (int i = 0; i < 4; ++i)
{
int dir = fromSq + dev_pawnAttacks[i];
// en passant move
if (dev_pawnAttacks[i] > 0 && !(dir & 0x88))
{
if (enPassant != noSq)
{
if (dir == enPassant)
DevAddMove(board, info, list, DevSetMove(fromSq, dir, 0, 1, 0, 1, 0));
}
}
if ((dev_pawnAttacks[i] > 0) && !(dir & 0x88) && isBlackPiece(dir))
{
if (rank_7)
{
DevAddMove(board, info, list, DevSetMove(fromSq, dir, wN, 1, 0, 0, 0));
DevAddMove(board, info, list, DevSetMove(fromSq, dir, wB, 1, 0, 0, 0));
DevAddMove(board, info, list, DevSetMove(fromSq, dir, wR, 1, 0, 0, 0));
DevAddMove(board, info, list, DevSetMove(fromSq, dir, wQ, 1, 0, 0, 0));
}
else
{
DevAddMove(board, info, list, DevSetMove(fromSq, dir, 0, 1, 0, 0, 0));
}
}
}
}
// castling
if (DevGetSq(fromSq) == wK)
{
if (castle & K)
{
if (!DevGetSq(f1) && !DevGetSq(g1))
{
if (!DevIsSquareAttacked(board, e1, b) && !DevIsSquareAttacked(board, f1, b))
DevAddMove(board, info, list, DevSetMove(e1, g1, 0, 0, 0, 0, 1));
}
}
if (castle & Q)
{
if (!DevGetSq(d1) && !DevGetSq(c1) && !DevGetSq(b1))
{
if (!DevIsSquareAttacked(board, e1, b) && !DevIsSquareAttacked(board, d1, b))
DevAddMove(board, info, list, DevSetMove(e1, c1, 0, 0, 0, 0, 1));
}
}
}
}
else
{
if (DevGetSq(fromSq) == bP)
{
// pawn quiet move
if (!((fromSq - 16) & 0x88) && !DevGetSq(fromSq - 16))
{
if (rank_2 && !DevGetSq(fromSq - 16))
{
DevAddMove(board, info, list, DevSetMove(fromSq, fromSq - 16, bN, 0, 0, 0, 0));
DevAddMove(board, info, list, DevSetMove(fromSq, fromSq - 16, bB, 0, 0, 0, 0));
DevAddMove(board, info, list, DevSetMove(fromSq, fromSq - 16, bR, 0, 0, 0, 0));
DevAddMove(board, info, list, DevSetMove(fromSq, fromSq - 16, bQ, 0, 0, 0, 0));
}
else
{
DevAddMove(board, info, list, DevSetMove(fromSq, fromSq - 16, 0, 0, 0, 0, 0));
if (rank_7 && !DevGetSq(fromSq - 32))
DevAddMove(board, info, list, DevSetMove(fromSq, fromSq - 32, 0, 0, 1, 0, 0));
}
}
// pawn capture move
for (int i = 0; i < 4; ++i)
{
int dir = fromSq + dev_pawnAttacks[i];
// en passant move
if (dev_pawnAttacks[i] < 0 && !(dir & 0x88))
{
if (enPassant != noSq)
{
if (dir == enPassant)
DevAddMove(board, info, list, DevSetMove(fromSq, dir, 0, 0, 0, 1, 0));
}
}
if ((dev_pawnAttacks[i] < 0) && !(dir & 0x88) && isWhitePiece(dir))
{
if (rank_2)
{
DevAddMove(board, info, list, DevSetMove(fromSq, dir, bN, 1, 0, 0, 0));
DevAddMove(board, info, list, DevSetMove(fromSq, dir, bB, 1, 0, 0, 0));
DevAddMove(board, info, list, DevSetMove(fromSq, dir, bR, 1, 0, 0, 0));
DevAddMove(board, info, list, DevSetMove(fromSq, dir, bQ, 1, 0, 0, 0));
}
else
{
DevAddMove(board, info, list, DevSetMove(fromSq, dir, 0, 1, 0, 0, 0));
}
}
}
}
// castling
if (DevGetSq(fromSq) == bK)
{
if (castle & k)
{
if (!DevGetSq(f8) && !DevGetSq(g8))
{
if (!DevIsSquareAttacked(board, e8, w) && !DevIsSquareAttacked(board, f8, w))
DevAddMove(board, info, list, DevSetMove(e8, g8, 0, 0, 0, 0, 1));
}
}
if (castle & q)
{
if (!DevGetSq(d8) && !DevGetSq(c8) && !DevGetSq(b8))
{
if (!DevIsSquareAttacked(board, e8, w) && !DevIsSquareAttacked(board, d8, w))
DevAddMove(board, info, list, DevSetMove(e8, c8, 0, 0, 0, 0, 1));
}
}
}
}
// knights
if (side ? DevGetSq(fromSq) == bN : DevGetSq(fromSq) == wN)
{
for (int i = 0; i < 8; ++i)
{
int dir = sq + dev_knightAttacks[i];
int delta = DevGetSq(dir);
if (!(dir & 0x88))
{
if (side ? (!delta || isWhitePiece(dir)) : (!delta || isBlackPiece(dir)))
{
if (!delta)
DevAddMove(board, info, list, DevSetMove(fromSq, dir, 0, 0, 0, 0, 0));
else
DevAddMove(board, info, list, DevSetMove(fromSq, dir, 0, 1, 0, 0, 0));
}
}
}
}
// bishops and queens
if (side ? (DevGetSq(fromSq) == bB) || (DevGetSq(fromSq) == bQ) : (DevGetSq(fromSq) == wB) || (DevGetSq(fromSq) == wQ))
{
for (int i = 0; i < 4; ++i)
{
int dir = sq + dev_bishopAttacks[i];
while (!(dir & 0x88))
{
int delta = DevGetSq(dir);
// if hits own piece
if (side ? isBlackPiece(dir) : isWhitePiece(dir))
break;
// if hits opponent's piece
else if (side ? isWhitePiece(dir) : isBlackPiece(dir))
{
DevAddMove(board, info, list, DevSetMove(fromSq, dir, 0, 1, 0, 0, 0));
break;
}
// on empty square
else if (!delta)
{
DevAddMove(board, info, list, DevSetMove(fromSq, dir, 0, 0, 0, 0, 0));
}
dir += dev_bishopAttacks[i];
}
}
}
// rooks and queens
if (side ? (DevGetSq(fromSq) == bR) || (DevGetSq(fromSq) == bQ) : (DevGetSq(fromSq) == wR) || (DevGetSq(fromSq) == wQ))
{
for (int i = 0; i < 4; ++i)
{
int dir = sq + dev_rookAttacks[i];
while (!(dir & 0x88))
{
int delta = DevGetSq(dir);
// if hits own piece
if (side ? isBlackPiece(dir) : isWhitePiece(dir))
break;
// if hits opponent's piece
else if (side ? isWhitePiece(dir) : isBlackPiece(dir))
{
DevAddMove(board, info, list, DevSetMove(fromSq, dir, 0, 1, 0, 0, 0));
break;
}
// on empty square
else if (!delta)
{
DevAddMove(board, info, list, DevSetMove(fromSq, dir, 0, 0, 0, 0, 0));
}
dir += dev_rookAttacks[i];
}
}
}
// kings
if (side ? DevGetSq(fromSq) == bK : DevGetSq(fromSq) == wK)
{
for (int i = 0; i < 8; ++i)
{
int dir = sq + dev_kingAttacks[i];
int delta = DevGetSq(dir);
if (!(dir & 0x88))
{
if (side ? (!delta || isWhitePiece(dir)) : (!delta || isBlackPiece(dir)))
{
if (!delta)
DevAddMove(board, info, list, DevSetMove(fromSq, dir, 0, 0, 0, 0, 0));
else
DevAddMove(board, info, list, DevSetMove(fromSq, dir, 0, 1, 0, 0, 0));
}
}
}
}
}
}
}
// Function that makes a Move and returns a number based on whether or not the move is legal
// The function works by taking the move and checking whether or not its legal, and if it is
// then the function will change the board position
static inline int MakeMove(Chessboard *board, int move, int capFlag)
{
// if capFlag make only captures else make all
if (!capFlag)
{
ply++;
Chessboard boardStored[1];
boardStored[0] = board[0];
int fromSq = GetMoveSource(move);
int toSq = GetMoveTarget(move);
// move piece
GetSq(toSq) = GetSq(fromSq);
GetSq(fromSq) = emSq;
// promotions
if (GetMovePromPiece(move))
{
GetSq(toSq) = GetMovePromPiece(move);
GetSq(fromSq) = emSq;
}
// en passant flag
if (GetMoveEnPassantFlag(move))
{
side ? (GetSq(enPassant + 16) = 0) : (GetSq(enPassant - 16) = 0);
enPassant = noSq;
}
enPassant = noSq;
// pawn start flag
if (GetMovePawnStartFlag(move))
{
side ? (enPassant = toSq + 16) : (enPassant = toSq - 16);
}
// castling flag
if (GetMoveCastleFlag(move))
{
switch (toSq)
{
case g1:
GetSq(f1) = GetSq(h1);
GetSq(h1) = emSq;
break;
case c1:
GetSq(d1) = GetSq(a1);
GetSq(a1) = emSq;
break;
case g8:
GetSq(f8) = GetSq(h8);
GetSq(h8) = emSq;
break;
case c8:
GetSq(d8) = GetSq(a8);
GetSq(a8) = emSq;
break;
}
}
// update castling permission
castle &= castling[fromSq];
castle &= castling[toSq];
// update kingSq
if (GetSq(GetMoveTarget(move)) == wK || GetSq(GetMoveTarget(move)) == bK)
kingSq(side) = GetMoveTarget(move);
// change side
side ^= 1;
// take back if king is in check
if (InCheck(board, side ^ 1))
{
TakeBack(board, boardStored);
return 0;
}
else
return 1;
}
else
{
if (GetMoveCaptureFlag(move))
MakeMove(board, move, allPos);
else
return 0;
}
return 0;
}
// Device version of function above
__device__ static inline int DevMakeMove(Chessboard *board, int move, int capFlag)
{
// if capFlag make only captures else make all
if (!capFlag)
{
ply++;
Chessboard boardStored[1];
boardStored[0] = board[0];
int fromSq = DevGetMoveSource(move);
int toSq = DevGetMoveTarget(move);
// move piece
DevGetSq(toSq) = DevGetSq(fromSq);
DevGetSq(fromSq) = emSq;
// promotions
if (DevGetMovePromPiece(move))
{
DevGetSq(toSq) = DevGetMovePromPiece(move);
DevGetSq(fromSq) = emSq;
}
// en passant flag
if (DevGetMoveEnPassantFlag(move))
{
side ? (DevGetSq(enPassant + 16) = 0) : (DevGetSq(enPassant - 16) = 0);
enPassant = noSq;
}
enPassant = noSq;
// pawn start flag
if (DevGetMovePawnStartFlag(move))
{
side ? (enPassant = toSq + 16) : (enPassant = toSq - 16);
}
// castling flag
if (DevGetMoveCastleFlag(move))
{
switch (toSq)
{
case g1:
DevGetSq(f1) = DevGetSq(h1);
DevGetSq(h1) = emSq;
break;
case c1:
DevGetSq(d1) = DevGetSq(a1);
DevGetSq(a1) = emSq;
break;
case g8:
DevGetSq(f8) = DevGetSq(h8);
DevGetSq(h8) = emSq;
break;
case c8:
DevGetSq(d8) = DevGetSq(a8);
DevGetSq(a8) = emSq;
break;
}
}
// update castling permission
castle &= dev_castling[fromSq];
castle &= dev_castling[toSq];
// update kingSq
if (DevGetSq(DevGetMoveTarget(move)) == wK || DevGetSq(DevGetMoveTarget(move)) == bK)
DevkingSq(side) = DevGetMoveTarget(move);
// change side
side ^= 1;
// take back if king is in check
if (DevInCheck(board, side ^ 1))
{
DevTakeBack(board, boardStored);
return 0;
}
else
return 1;
}
else
{
if (DevGetMoveCaptureFlag(move))
DevMakeMove(board, move, allPos);
else
return 0;
}
return 0;
}
// Evaluates the position
// The function loops through all the pieces on the board and
// adds the material weight of every piece to a sum
// the material weight is defined by the tables at the top
// which are the stockfish analysis tables.
static inline int EvaluatePosition(Chessboard *board)
{
int score = 0;
for (int sq = 0; sq < 128; ++sq)
{
if (!(sq & 0x88) && GetSq(sq))
{
// evaluate material
score += materialWeight[GetSq(sq)];
// evaluate piece placement
switch (GetSq(sq))
{
case wP:
score += Pawns[sq];
break;
case wN:
score += Knights[sq];
break;
case wB:
score += Bishops[sq];
break;
case wR:
score += Rooks[sq];
break;
case wK:
score += Kings[sq];
break;
case bP:
score -= Pawns[MirrorSq(sq)];
break;
case bN:
score -= Knights[MirrorSq(sq)];
break;
case bB:
score -= Bishops[MirrorSq(sq)];
break;
case bR:
score -= Rooks[MirrorSq(sq)];
break;
case bK:
score -= Kings[MirrorSq(sq)];
break;
}
}
}
if (!side)
return score;
else
return -score;
}
// Device repeat function
__device__ static inline int DevEvaluatePosition(Chessboard *board)
{
int score = 0;
for (int sq = 0; sq < 128; ++sq)
{
if (!(sq & 0x88) && GetSq(sq))
{
// evaluate material
score += dev_materialWeight[GetSq(sq)];
// evaluate piece placement
switch (GetSq(sq))
{
case wP:
score += dev_Pawns[sq];
break;
case wN:
score += dev_Knights[sq];
break;
case wB:
score += dev_Bishops[sq];
break;
case wR:
score += dev_Rooks[sq];
break;
case wK:
score += dev_Kings[sq];
break;
case bP:
score -= dev_Pawns[DevMirrorSq(sq)];
break;
case bN:
score -= dev_Knights[DevMirrorSq(sq)];
break;
case bB:
score -= dev_Bishops[DevMirrorSq(sq)];
break;
case bR:
score -= dev_Rooks[DevMirrorSq(sq)];
break;
case bK:
score -= dev_Kings[DevMirrorSq(sq)];
break;
}
}
}
if (!side)
return score;
else
return -score;
}
// Sets all of the Search variables to 0
void InitSearch(Search *info)
{
info->nodes = 0;
info->fhf = 0;
info->fh = 0;
info->bestScore = 0;
}
// NegaMaxSearch that runs entirely on the CPU, for comparison purposes, explained in doc
static int RegNegaMaxSearch(Chessboard *board, Search *info, int depth)
{
int bestMove = 0;
int alpha = -50000;
int oldAlpha = alpha;
int score = -50000;
int legalMoves = 0;
info->nodes++;
if (depth == 0)
return EvaluatePosition(board);
Movelist list[1];
GenerateMoves(board, info, list);
// loops through all possible moves, recurssively calls function
for (int moveNum = 0; moveNum < list->moveCount; ++moveNum)
{
Chessboard boardStored[1];
boardStored[0] = board[0];
SortMoves;
if (!MakeMove(board, list->moves[moveNum].move, allPos))
continue;
legalMoves++;
score = -RegNegaMaxSearch(board, info, depth - 1);
TakeBack(board, boardStored);
if (score > alpha)
{
alpha = score;
bestMove = list->moves[moveNum].move;
}
}
if (!legalMoves)
{
if (InCheck(board, side))
return -49000 + ply; // on checkmate
else
return 0; // on stalemate
}
if (alpha != oldAlpha)
{
info->bestMove = bestMove;
}
return alpha;
}
// NegaMaxSearch used when CPU depth is above 2, function is not in use anywhere as CPUdepth > 2 is not allowed
static int EvalNegaMaxSearch(Chessboard *board, Search *info, Search *searchP, int depth, int &count)
{
int bestMove = 0;
int alpha = -50000;
int oldAlpha = alpha;
int score = -50000;
int legalMoves = 0;
info->nodes++;
if (depth == 0)
{
count++;
return searchP[count - 1].bestScore;
}
Movelist list[1];
GenerateMoves(board, info, list);
// the good loop
for (int moveNum = 0; moveNum < list->moveCount; ++moveNum)
{
Chessboard boardStored[1];
boardStored[0] = board[0];
SortMoves;
if (!MakeMove(board, list->moves[moveNum].move, allPos))
continue;
legalMoves++;
score = -EvalNegaMaxSearch(board, info, searchP, depth - 1, count);
TakeBack(board, boardStored);
if (score > alpha)
{
alpha = score;
bestMove = list->moves[moveNum].move;
}
}
if (!legalMoves)
{
if (InCheck(board, side))
return -49000 + ply; // on checkmate
else
return 0; // on stalemate
}
if (alpha != oldAlpha)
{
info->bestMove = bestMove;
}
return alpha;
}
// Same as CreateNegaMaxSearch except dcount is no longer here and since we aren't at the top depth validMoves are not being added
static void ContinueNegaMaxSearch(Chessboard *board, Search *info, int depth, int &count)
{
info->nodes++;
if (depth == 0)
{
Search push;
InitSearch(&push);
boards[count] = board[0];
searches.push_back(push);
count++;
return;
}
Movelist list[1];
GenerateMoves(board, info, list);
// the good loop
for (int moveNum = 0; moveNum < list->moveCount; ++moveNum)
{
Chessboard boardStored[1];
boardStored[0] = board[0];
if (!MakeMove(board, list->moves[moveNum].move, allPos))
continue;
ContinueNegaMaxSearch(board, info, depth - 1, count);
// store the count in a vector
TakeBack(board, boardStored);
}
return;
}
//NegaMaxSearch style searchwith the adding of the board positions and info to the vectors.
static void CreateNegaMaxSearch(Chessboard *board, Search *info, Move *valid, int *moveCounterPtr, int depth, int &count, int &dcount)
{
info->nodes++;
if (depth == 0)
{
Search push;
InitSearch(&push);
boards[count] = board[0];
searches.insert(searches.begin(), push);
count++;
return;
}
Movelist list[1];
GenerateMoves(board, info, list);
// Loops through all possible moves at this level and recursively calls itself with 1 less depth
for (int moveNum = 0; moveNum < list->moveCount; ++moveNum)
{
Chessboard boardStored[1];
boardStored[0] = board[0];
if (!MakeMove(board, list->moves[moveNum].move, allPos))
continue;
// Adds valid moves to array for analysis after GPU search
valid[dcount].move = list->moves[moveNum].move;
ContinueNegaMaxSearch(board, info, depth - 1, count);
// Counter used for searching when CPUdepth = 2 to do a MiniMax search
moveCounterPtr[dcount] = count;
//moveCounter->push_back(count[0]);
dcount++;
TakeBack(board, boardStored);
}
}
// NegaMaxSearch that the GPU should call, Same as RegNegaMax
__device__ static int SplitNegaMaxSearch(Chessboard *board, Search *info, int depth)
{
int bestMove = 0;
int alpha = -50000;
int oldAlpha = alpha;
int score = -50000;
int legalMoves = 0;
info->nodes++;
if (depth == 0)
{
//printf("ep ");
return DevEvaluatePosition(board);
}
Movelist list[1];
DevGenerateMoves(board, info, list);
// the good loop
for (int moveNum = 0; moveNum < list->moveCount; ++moveNum)
{
Chessboard boardStored[1];
boardStored[0] = board[0];
SortMoves;
if (!DevMakeMove(board, list->moves[moveNum].move, allPos))
continue;
legalMoves++;
score = -SplitNegaMaxSearch(board, info, depth - 1);
// this feels really inefficient as we could just DevMakeMove the same move backward
DevTakeBack(board, boardStored);
if (score > alpha)
{
alpha = score;
bestMove = list->moves[moveNum].move;
}
}
if (!legalMoves)
{
if (DevInCheck(board, side))
return -49000 + ply; // on checkmate
else
return 0; // on stalemate
}
if (alpha != oldAlpha)
{
info->bestMove = bestMove;
info->bestScore = alpha;
}
return alpha;
}
// Kernel function called that starts searching assuming the thread number is in our needed array of searches
__global__ static void kernelSearch(Chessboard *dev_board, Search *dev_searches, int *dev_totalThreadCount, int *gpu_depth)
{
int location = 256 * blockIdx.x + threadIdx.x;
int depth = gpu_depth[0];
//printf("ker: %i\n", location);
// checks if this thread needs to calculate
if (location < dev_totalThreadCount[0])
{
SplitNegaMaxSearch(&dev_board[location], &dev_searches[location], depth);
//printf("done");
}
}
//The Search algorithm that calls and manages everything
static int GPUNegaMaxSearch(Chessboard *board, Search *info, int cpu_depth, int gpu_depth)
{
// Used for when cpuDepth is 2
int moveCounter[146];
boards.reserve(pow(100, 3));
searches.reserve(pow(100, 3));
// initalize the variables to 0
int totalThreadCount = 0;
int dcount = 0;
Movelist saveList[1];
saveList->moveCount = 0;
GenerateMoves(board, info, saveList);
Move validMoves[256];
Move *validMovePtr = validMoves;
int *moveCounterPtr = moveCounter;
// Creates and saves the boards we need to run on the GPU
CreateNegaMaxSearch(board, info, validMovePtr, moveCounterPtr, cpu_depth, totalThreadCount, dcount);
// Calculate the block count and total threads per block
double result = (double)totalThreadCount / (double)256;
int blockCount = (int)(ceil(result));
//printf("blockCount: %i", blockCount);
int threadsPerBlock = 256;
if (totalThreadCount < threadsPerBlock)
threadsPerBlock = totalThreadCount;
//printf("TTC: %i\n", totalThreadCount);
// Copy arrys over to the device
Chessboard *boardP;
Search *searchP;
boardP = (Chessboard *)malloc(totalThreadCount * sizeof(Chessboard));
searchP = (Search *)malloc(totalThreadCount * sizeof(Search));
Chessboard *dev_boards;
Search *dev_searches;
int *dev_totalThreadCount;
int *dev_gpuDepth;
//allocate space for boards and searchers
cudaMalloc((void **)&dev_boards, totalThreadCount * sizeof(Chessboard));
cudaMalloc((void **)&dev_searches, totalThreadCount * sizeof(Search));
cudaMalloc((void **)&dev_totalThreadCount, sizeof(int));
cudaMalloc((void **)&dev_gpuDepth, sizeof(int));
// copy vectors to arrays
for (int i = 0; i < totalThreadCount; i++)
{
boardP[i] = boards[i];
searchP[i] = searches[i];
}
// copy arrays to the GPU
cudaMemcpy(dev_boards, boardP, totalThreadCount * sizeof(Chessboard), cudaMemcpyHostToDevice);
cudaMemcpy(dev_searches, searchP, totalThreadCount * sizeof(Search), cudaMemcpyHostToDevice);
cudaMemcpy(dev_totalThreadCount, &totalThreadCount, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_gpuDepth, &gpu_depth, sizeof(int), cudaMemcpyHostToDevice);
// CUDA cannot calculate recursive stack size, so we have to set stack size manually
size_t limit = 2616*(gpu_depth+1); //space taken up by going one deeper, plus the first depth
cudaDeviceSetLimit(cudaLimitStackSize, limit);
// for testing
//threadsPerBlock = 256;
// create variables and then run our search
dim3 grid(blockCount, 1, 1);
kernelSearch<<<grid, threadsPerBlock>>>(dev_boards, dev_searches, dev_totalThreadCount, dev_gpuDepth);
cudaDeviceSynchronize();
// copy the data back to the arrays
cudaMemcpy(boardP, dev_boards, totalThreadCount * sizeof(Chessboard), cudaMemcpyDeviceToHost);
cudaMemcpy(searchP, dev_searches, totalThreadCount * sizeof(Search), cudaMemcpyDeviceToHost);
cudaFree(dev_boards);
cudaFree(dev_searches);
int retScore = -50000;
int bestIndex = 0;
if (cpu_depth == 1)
{
// Simple Search
int reverse = -1;
for (int i = 0; i < totalThreadCount; i++)
{
// printf("Index: %i Move: ", i);
// PrintMove(searchP[i].bestMove);
// printf(" Score: %i \n", searchP[i].bestScore);
if (reverse * searchP[i].bestScore > retScore)
{
retScore = reverse * searchP[i].bestScore;
bestIndex = i;
//printf("index: %i ", bestIndex);
//PrintMove(searchP[i].bestMove);
//printf("\n");
}
}
info->bestMove = validMoves[bestIndex].move;
}
else if (cpu_depth == 2)
{
// MiniMax adjacent search to determine our best move
int lastIndex = 0;
int maxVal = -90000;
int minVal = 90000;
for (int i = 0; i < dcount; i++)
{
minVal = 90000;
for (int j = lastIndex; j < moveCounter[i]; j++)
{
if (searchP[j].bestScore < minVal)
{
minVal = searchP[j].bestScore;
}
}
if (minVal > maxVal)
{
maxVal = minVal;
bestIndex = i;
}
lastIndex = moveCounter[i];
}
retScore = maxVal;
info->bestMove = validMoves[bestIndex].move;
}
else
{
// This is logically sound as the evalutation function just becomes the array index of that position
// but will not launch as the number of threads will be too high
int count = 0;
retScore = EvalNegaMaxSearch(board, info, searchP, cpu_depth, count);
}
// printf("index: %i \n", bestIndex);
free(boardP);
free(searchP);
return retScore;
}
// Function that runs our search and times our functions
static inline void SearchPosition(Chessboard *board, Search *info, int cpuDepth, int gpuDepth)
{
clock_t start, end;
start = clock();
int score;
if (gpuDepth > 0)
{
score = GPUNegaMaxSearch(board, info, cpuDepth, gpuDepth);
}
else
{
score = RegNegaMaxSearch(board, info, cpuDepth);
}
end = clock();
double time = (double)(end - start) / CLOCKS_PER_SEC;
if (score == 49000)
return;
printf("info score cp %d depth %d depth %d\n", score, cpuDepth, gpuDepth);
printf("Time taken to depth %f seconds\n", time);
FILE *out_file = fopen("out_file.txt", "a"); // write only
fprintf(out_file, "%f \n", time);
fclose(out_file);
printf("bestmove ");
PrintMove(info->bestMove);
printf("\n");
//printf("Move ordering: %.2f\n",(info->fhf/info->fh));
}
// Function that converts a FEN into a board
void ParseFen(Chessboard *board, char *fen)
{
ResetBoard(board);
RankLoop{
FileLoop{
int sq = fr2sq(file, rank);
// parse position
if (IsOnBoard(sq))
{
if (isPieceChar(*fen))
{
if (*fen == 'K')
kingSq(w) = sq;
else if (*fen == 'k')
kingSq(b) = sq;
//printf( "%i", *fen);
//printf("Here");
switch (*fen)
{
case 114:
SetSq(sq, 12);
break; // wR
case 110:
SetSq(sq, 10);
break; // wN
case 98:
SetSq(sq, 11);
break; // wB
case 113:
SetSq(sq, 13);
break; // wQ
case 112:
SetSq(sq, 9);
break; // wP
case 82:
SetSq(sq, 4);
break; //bR
case 78:
SetSq(sq, 2);
break; //bN
case 66:
SetSq(sq, 3);
break; //bB
case 81:
SetSq(sq, 5);
break; //bQ
case 80:
SetSq(sq, 1);
break; //bP
}
*fen++;
}
if (isDigit(*fen))
{
int count = *fen - '0';
if (!GetSq(sq))
file++;
file -= count;
*fen++;
}
if (*fen == '/')
{
*fen++;
file--;
}
}
}
}
*fen++;
// parse stats
side = (*fen == 'w') ? w : b;
fen += 2;
while (*fen != ' ')
{
switch (*fen)
{
case 'K':
castle |= K;
break;
case 'Q':
castle |= Q;
break;
case 'k':
castle |= k;
break;
case 'q':
castle |= q;
break;
case '-':
castle = 0;
}
fen++;
}
fen++;
if (*fen != '-')
{
int file = fen[0] - 'a';
int rank = fen[1] - '0';
enPassant = parse2sq(file, rank);
}
}
// Parses a move
int ParseMove(Chessboard *board, Search *info, char *moveStr)
{
Movelist list[1];
GenerateMoves(board, info, list);
int parseFrom = (moveStr[0] - 'a') + (moveStr[1] - '0' - 1) * 16;
int parseTo = (moveStr[2] - 'a') + (moveStr[3] - '0' - 1) * 16;
int promPiece = 0;
int move;
for (int moveNum = 0; moveNum < list->moveCount; ++moveNum)
{
move = list->moves[moveNum].move;
if (GetMoveSource(move) == parseFrom && GetMoveTarget(move) == parseTo)
{
promPiece = GetMovePromPiece(move);
if (promPiece)
{
if ((promPiece == wN || promPiece == bN) && moveStr[4] == 'n')
return move;
else if ((promPiece == wB || promPiece == bB) && moveStr[4] == 'b')
return move;
else if ((promPiece == wR || promPiece == bR) && moveStr[4] == 'r')
return move;
else if ((promPiece == wQ || promPiece == bQ) && moveStr[4] == 'q')
return move;
continue;
}
return move;
}
}
return 0;
}
// Performance testing code when "test" is called, Search Position but loops through file for input
static inline void TestSearchPosition(Chessboard *board, Search *info, int cpuDepth, int gpuDepth)
{
char const *const fileName = "test.txt";
FILE *file = fopen(fileName, "r");
char line[256];
while (fgets(line, sizeof(line), file))
{
ParseFen(board, line);
InitSearch(info);
PrintBoard(board);
clock_t start, end;
start = clock();
int score;
if (gpuDepth > 0)
{
score = GPUNegaMaxSearch(board, info, cpuDepth, gpuDepth);
}
else
{
score = RegNegaMaxSearch(board, info, cpuDepth);
}
end = clock();
double time = (double)(end - start) / CLOCKS_PER_SEC;
if (score == 49000)
return;
printf("info score cp %d depth %d depth %d\n", score, cpuDepth, gpuDepth);
printf("Time taken to depth %f seconds\n", time);
FILE *out_file = fopen("out_file.txt", "a"); // write only
fprintf(out_file, "%f \n", time);
fclose(out_file);
printf("bestmove ");
PrintMove(info->bestMove);
printf("\n");
}
}
// Hammerhead main
int main(int argc, char *argv[])
{
// Init everything
Chessboard board[1];
Search info[1];
InitSearch(info);
char inFen[87];
int cpuDepth = 2;
int gpuDepth = 3;
// If user specifies depth, set depths
if (argc == 3)
{
cpuDepth = atoi(argv[1]);
gpuDepth = atoi(argv[2]);
}
// Larger cpuDepth is not yet implemented so this is a failsafe
if (cpuDepth > 2){
cpuDepth = 2;
}
// Loop through accepting and analyzing FENs
while (1)
{
printf("Please enter a fen to analyze: \n");
// take input fen
fgets(inFen, 87, stdin);
if (std::strstr(inFen, "quit") || std::strstr(inFen, "done") || std::strstr(inFen, "over"))
{
return 0;
}
if (std::strstr(inFen, "test"))
{
TestSearchPosition(board, info, cpuDepth, gpuDepth);
return 0;
}
ParseFen(board, inFen);
PrintBoard(board);
SearchPosition(board, info, cpuDepth, gpuDepth);
}
return 0;
}
|
13,097 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void strongestNeighborScan_gpu(int * src, int * oldDst, int * newDst, int * oldWeight, int * newWeight, int * madeChanges, int distance, int numEdges) {
/*YOUR CODE HERE*/
/*find the number of threads & if of the current thread*/
int allThreads = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
/*increment by tid by also keeping track of allThreads*/
for(int i = tid; i< numEdges; i += allThreads){
if((i-distance) >= 0 && (src[i] == src[i-distance])){
if(oldWeight[i] > oldWeight[i-distance]){
newWeight[i] = oldWeight[i];
newDst[i] = oldDst[i];
}else{
(*madeChanges) = 1;
newWeight[i] = oldWeight[i-distance];
newDst[i] = oldDst[i-distance];
}
}else{
newDst[i] = oldDst[i];
newWeight[i] = oldWeight[i];
}
}
}
|
13,098 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void simpleKernel(float *dst, float *src)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
float temp = src[idx];
dst[idx] = temp * temp;
}
int execute()
{
float *src, *dst;
float *dsrc, *ddst;
size_t rsize = 64;
size_t size = sizeof(float) * 64;
//cpu buffers
src = (float *)malloc(size);
dst = (float *)malloc(size);
//gpu buffers
cudaMalloc(&dsrc, size);
cudaMalloc(&ddst, size);
for (int i = 0; i < 64; ++i) {
src[i] = (float)i;
}
/*for (int i = 0; i < 64; ++i) {
printf("%f\n", src[i]);
}*/
cudaMemcpy(dsrc, src, size, cudaMemcpyHostToDevice);
simpleKernel<<<1, 64>>>(ddst, dsrc);
cudaMemcpy(dst, ddst, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < 64; ++i) {
printf("%d: %f\n", i, dst[i]);
}
cudaFree(ddst);
cudaFree(dsrc);
free(src);
free(dst);
return 0;
}
int main()
{
printf("\nSetting device...");
cudaSetDevice(0);
execute();
printf("\nSetting device...");
cudaSetDevice(1);
execute();
return 0;
}
|
13,099 | #include "includes.h"
#define SIZ 20
#define num_inp 4
using namespace std;
typedef struct edge {
int first, second;
} edges;
__global__ void dscore_cal_kernel(double * dscores, int num_inputs, int size)
{
int i = blockIdx.x;
int j = threadIdx.x;
dscores[i*size + j] /= num_inputs;
} |
13,100 | // nvcc pthread_cuda.cu -o pthread_cuda --cudart=shared -lpthread
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <unistd.h>
#include <sys/syscall.h>
#define ARR_SIZE 10
#define NUM_DEVICE 2
// Macro to catch CUDA errors in CUDA runtime calls
#define CUDA_CHECK(call) \
do { \
cudaError_t err = call; \
if (cudaSuccess != err) { \
fprintf (stderr, "Cuda error in file '%s' in line %i : %s.\n",\
__FILE__, __LINE__, cudaGetErrorString(err) ); \
exit(EXIT_FAILURE); \
} \
} while (0)
typedef struct {
int *arr;
int *dev_arr;
int *dev_result;
int *result;
int num;
} cuda_st;
__global__ void kernel_fc(int *dev_arr, int *dev_result)
{
int idx = threadIdx.x;
printf("dev_arr[%d] = %d\n", idx, dev_arr[idx]);
atomicAdd(dev_result, dev_arr[idx]);
}
void *thread_func(void* struc)
{
cudaEvent_t start, stop;
cuda_st * data = (cuda_st*)struc;
printf("thread %d func start\n", data->num);
printf("arr %d = ", data->num);
for(int i=0; i<10; i++) {
printf("%d ", data->arr[i]);
}
printf("\n");
for(int i=0; i<2; i++) {
CUDA_CHECK(cudaSetDevice(data->num));
CUDA_CHECK(cudaEventCreate(&start));
CUDA_CHECK(cudaEventCreate(&stop));
CUDA_CHECK(cudaMemcpy(data->dev_arr, data->arr, sizeof(int)*ARR_SIZE, cudaMemcpyHostToDevice));
kernel_fc<<<1,ARR_SIZE>>>(data->dev_arr, data->dev_result);
CUDA_CHECK(cudaMemcpy(data->result, data->dev_result, sizeof(int), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaEventDestroy(start));
CUDA_CHECK(cudaEventDestroy(stop));
}
printf("thread %ld func exit\n", syscall(SYS_gettid));
return NULL;
}
int main(void)
{
// Make object
cuda_st cuda[NUM_DEVICE];
// Make thread
pthread_t pthread[NUM_DEVICE];
// Host array memory allocation
int *arr[NUM_DEVICE];
for(int i=0; i<NUM_DEVICE; i++) {
arr[i] = (int*)malloc(sizeof(int)*ARR_SIZE);
}
// Fill this host array up with specified data
for(int i=0; i<NUM_DEVICE; i++) {
for(int j=0; j<ARR_SIZE; j++) {
arr[i][j] = i*ARR_SIZE+j;
}
}
// To confirm host array data
for(int i=0; i<NUM_DEVICE; i++) {
printf("arr[%d] = ", i);
for(int j=0; j<ARR_SIZE; j++) {
printf("%d ", arr[i][j]);
}
printf("\n");
}
// Result memory allocation
int *result[NUM_DEVICE];
for(int i=0; i<NUM_DEVICE; i++) {
result[i] = (int*)malloc(sizeof(int));
memset(result[i], 0, sizeof(int));
}
// Device array memory allocation
int *dev_arr[NUM_DEVICE];
for(int i=0; i<NUM_DEVICE; i++) {
CUDA_CHECK(cudaMalloc(&dev_arr[i], sizeof(int)*ARR_SIZE));
}
// Device result memory allocation
int *dev_result[NUM_DEVICE];
for(int i=0; i<NUM_DEVICE; i++) {
CUDA_CHECK(cudaMalloc(&dev_result[i], sizeof(int)));
CUDA_CHECK(cudaMemset(dev_result[i], 0, sizeof(int)));
}
// Connect these pointers with object
for(int i=0; i<NUM_DEVICE; i++) {
cuda[i].arr = arr[i];
cuda[i].dev_arr = dev_arr[i];
cuda[i].result = result[i];
cuda[i].dev_result = dev_result[i];
cuda[i].num = 0;
}
// Create and excute pthread
for(int i=0; i<NUM_DEVICE; i++) {
pthread_create(&pthread[i], NULL, thread_func, (void*)&cuda[i]);
}
// Join pthread
for(int i=0; i<NUM_DEVICE; i++) {
pthread_join(pthread[i], NULL);
}
for(int i=0; i<NUM_DEVICE; i++) {
printf("result[%d] = %d\n", i, (*cuda[i].result));
}
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.