serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
5,601 | #include "includes.h"
__global__ void doGPUWork(int numData, int *data) {
if (threadIdx.x < numData) {
data[threadIdx.x] = threadIdx.x;
}
} |
5,602 | #include <stdio.h>
// CUDA to assign a value to each element of the array of integers A[256] using 256 threads.
// Each A[i] is assigned with the value of 2*i, for i = 0 to 255.
#define T 256 // As Threads
#define ArraySize 1314
// #define n 256
__global__ void vecMultiply(int *A) {
int i;
int threadID = threadIdx.x;
int start = (threadID * ArraySize) / 256;
int end = ( ( (threadID + 1 ) * ArraySize) / 256) - 1;
for(i = start ; i < end ; i++)
{
A[i] = A[i] * 2;
}
}
int main (int argc, char *argv[]){
int i;
int size = ArraySize*sizeof(int);
int a[size], *devA;
for (i=0; i< ArraySize; i++)
{
a[i] = i + 1;
}
cudaMalloc( (void**)&devA,size);
cudaMemcpy( devA, a, size, cudaMemcpyHostToDevice);
vecMultiply<<<1, 256>>>(devA); // 1 , 256 mean send each data with total thread 256 threads
printf("Before\n");
for (i=0; i< ArraySize; i++)
{
printf("%d ", a[i]);
}
printf("\n");
cudaMemcpy(a, devA, size, cudaMemcpyDeviceToHost);
cudaFree(devA);
printf("After\n");
for (i=0; i < ArraySize; i++) {
printf("%d ",a[i]);
}
printf("\n");
}
|
5,603 | /**************
* HydroCuda model
*
* HBV hydrological model
* using cuda
***************/
#include <math.h>
__global__ void hbv_dynamic( float *q ,
float *etr ,
float *qin ,
float *qlz ,
float *qq ,
float *qi ,
float *qcf ,
float *sp ,
float *inet ,
float *qdr ,
float *pp ,
float *etp ,
float *sm ,
float *fc ,
float *vlz ,
float *vuz ,
float *cflux,
float *icf ,
float *beta ,
float *lp ,
float *perc ,
float *uzl ,
float *klz ,
float *ki ,
float *kq )
{
const int i = blockDim.x*blockDim.y*blockIdx.y*gridDim.x +
blockDim.x*blockDim.y*blockIdx.x + blockDim.x*threadIdx.y + threadIdx.x;
// In flows
if (pp[i] > icf[i]){
qin[i] = pp[i] - icf[i];
}
else {
qin[i] = 0.f;
}
etr[i] = min(icf[i], etp[i]);
// State variables
sm[i] = sm[i] + qin[i];
qdr[i] = max(sm[i] - fc[i], 0.f);
sm[i] = sm[i] - qdr[i];
inet[i] = qin[i] - qdr[i];
sp[i] = pow(sm[i] / fc[i], beta[i]) * inet[i];
sm[i] = sm[i] - sp[i];
// Evapotranspirations
etp[i] = etp[i] - etr[i];
etr[i] = min(min(sm[i] * etp[i] / (lp[i] * fc[i]), etp[i]), sm[i]);
sm[i] = sm[i] - etr[i];
// Volume states
vlz[i] = vlz[i] + min(perc[i], qdr[i] + sp[i]);
vuz[i] = vuz[i] + max(0.f, qdr[i] + sp[i] - perc[i]);
// Capillar flow
qcf[i] = cflux[i] * (fc[i] - sm[i]) / fc[i];
sm[i] = sm[i] + min(vuz[i], qcf[i]);
vuz[i] = max(vuz[i] - qcf[i], 0.f);
// Quick and inter flow
qq[i] = max(kq[i] * (vuz[i] - uzl[i]), 0.f);
qi[i] = ki[i] * min(uzl[i], vuz[i]);
// Base flow
qlz[i] = klz[i] * vlz[i];
// Final flow
q[i] = qlz[i] + qi[i] + qq[i];
}
|
5,604 | /*! \file
* \brief Custom GPU kernels used in the SAP algorithm
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include "kernels.cuh"
using namespace std;
__global__ void readVec(float * d_vector){
/**
Reads some number (specified by the number of threads at execution)
of entries from an array of floats on the device. Useful for debugging.
@param d_vector The name of the array of floats on the device
which the kernel prints entries from.
*/
int idx = blockIdx.x*blockDim.x + threadIdx.x;
printf("The entry at %d is %f\n",idx,d_vector[idx]);
}
__global__ void readVecInt(int * d_vector){
/**
Reads some number (specified by the number of threads at execution)
of entries from an array of floats on the device. Useful for debugging.
@param d_vector The name of the array of floats on the device
which the kernel prints entries from.
*/
int idx = blockIdx.x*blockDim.x + threadIdx.x;
printf("The entry at %d is %i\n",idx,d_vector[idx]);
}
// Calculate the secants for a collection of vectors stored as columns in a matrix, then normalize
__global__ void calculate_secants(float * d_secants, float * dpoints_in, int * d_int_constants){
/**
Calculates the normalized secant set for a set of points.
@param d_secants The secant set for d_points. Secants are stored as column
vectors so the dimension of this matrix is (dim. of input data x number of
secants). This matrix is the output of this kernel.
@param d_points The input points stored as column vectors. The dimension
of this matrix is (dim. of input data x number of points in data set)..
@param d_int_constants An integer array which holds the input dimension
(the second entry in the array) and number of points (the second
entry in the array)
*/
int idx = blockIdx.x*blockDim.x + threadIdx.x;
// Number of points
int n = d_int_constants[1];
// Input dimension
int input_dim = d_int_constants[0];
// Parameters used to pair points to calculate secants
int i = idx % (n-1);
int j = (idx - i)/(n-1);
// Number of points for given pair
int pair1;
int pair2;
if (i >= j){
pair1 = i+1;
pair2 = j;
}else{
pair1 = n-i-1;
pair2 = n-j-1;
}
// For loop calculates secant coordinate by coordinate
for (int p = 0; p < input_dim; p++){
d_secants[idx*input_dim + p] = dpoints_in[input_dim*pair1 + p] - dpoints_in[input_dim*pair2 + p];
}
// Variable to store the norm of the secant
float norm = 0;
// Iterate through entries of the secant to calculate its norm
for (int p = 0; p < input_dim; p++){
norm = norm + powf(d_secants[idx*input_dim + p],2);
}
norm = sqrtf(norm);
// As long as the norm is not zero, normalize the secant
if (norm != 0.0){
for (int p = 0; p < input_dim; p++){
d_secants[idx*input_dim + p] = (1/norm)*d_secants[idx*input_dim + p];
}
}
}
// Take a matrix and return a vector whose entries are the l2 norms of the matrix
__global__ void calculate_col_norms(float * d_matrix, float * d_col_norms, int * d_int_constants){
/**
Takes a matrix and returns a row vector whose entries are the L2 norms of each
entry in the matrix.
@param d_matrix The matrix whose column norms are being calculated
@param d_col_norms The row vector which stores the calculated column norms
@param d_int_constants An integer array that stores the dimensions of the matrix. The third
entry is the length of each column.
*/
int idx = blockIdx.x*blockDim.x + threadIdx.x;
// Get the length of columns
int proj_dim = d_int_constants[2];
// Summation variable to store column norms
float sum = 0;
// Iterate down the columns, summing the squares of the entries
for (int i = 0; i < proj_dim; i++){
sum = sum + powf(d_matrix[idx*proj_dim + i],2);
}
// Take square root of resulting sum
d_col_norms[idx] = sqrtf(sum);
}
__global__ void switch_columns(float * d_matrix, int * d_column_switch_indices){
/**
Takes a matrix and switches two specified columns.
@param d_matrix The matrix whose columns are being switched.
@param d_column_switch_indices The indices of the columns to be switched.
*/
int idx = blockIdx.x*blockDim.x + threadIdx.x;
// Initialize float to hold values from was column while the other is
// copied over
float temp;
// Number of rows
int rows = d_column_switch_indices[0];
// Index of first column to be swapped
int index1 = d_column_switch_indices[1];
// Index of second column to be swapped
int index2 = d_column_switch_indices[2];
// Save first column as temp
temp = d_matrix[index1*rows + idx];
// Copy second column into first index position
d_matrix[index1*rows + idx] = d_matrix[index2*rows + idx];
// Copy original first column into second index position
d_matrix[index2*rows + idx] = temp;
}
__global__ void shift_first_column(float * d_new_matrix, float * d_old_matrix, float * d_new_column, float * d_algo_constants){
/**
Takes as input a matrix and shifted the first column by replacing it by
a convex combination of the first column and a new column.
@param d_new_matrix The output matrix with shifted column.
@param d_old_matrix The input matrix whose first column is being shifted.
@param d_new_column The column vector that will be used to shift the first
column of d_matrix.
@param d_algo_constants A float array used to store the parameter controling the
convex combination.
*/
int idx = blockIdx.x*blockDim.x + threadIdx.x;
// The parameter that controls how much the new column will be shifted by
float alpha = d_algo_constants[0];
// Shift entries of the first column of the matrix
d_new_matrix[idx] = (1.0-alpha)*d_old_matrix[idx] + alpha*(d_new_column[idx] - d_old_matrix[idx]);
}
__global__ void normalize_first_column(float * d_matrix, int * d_int_constants){
/**
Kernel that normalizes the first column vector of a matrix.
@param d_matrix The matrix whose first column will be normalized.
@param d_int_constants An integer array whose first entry is the number of rows
of the matrix.
*/
// Variable to hold sum of squares of entries of the first column
float sum = 0;
// Variable to hold the height of the column to be normalized.
int column_height = d_int_constants[0];
// Sum squares of entries of the first column
for (int i = 0; i < column_height; i++){
sum = sum + powf(d_matrix[i],2);
}
sum = sqrtf(sum);
// As long as norm is not zero, divide all entries in first column by
// the first column norm
if (sum != 0.0){
for (int i = 0; i < column_height; i++){
d_matrix[i] = (1/sum)*d_matrix[i];
}
}
}
__global__ void make_identity(float * d_matrix, int * d_int_constants){
/**
Kernel that takes a matrix and sets it as the identity matrix.
@param d_matrix The matrix which will be set as the identity matrix..
@param d_int_constants An integer array that holds the dimenison of d_matrix.
*/
int idx = blockIdx.x*blockDim.x + threadIdx.x;
// The column number of the matrix
int col = idx/d_int_constants[2];
// The row number of the matrix
int row = idx - col*d_int_constants[2];
// If the column/row number correspond to a diagonal element
// then set existing entry to 1, otherwise set it to 0.
if (row == col){
d_matrix[idx] = 1.0;
}else{
d_matrix[idx] = 0.0;
}
}
|
5,605 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <math.h>
#define TILE_WIDTH 16
// Compute C = A * B
__global__ void matrixMultiply(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x, by = blockIdx.y,
tx = threadIdx.x, ty = threadIdx.y,
Row = by * TILE_WIDTH + ty,
Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) {
//load data to shared memory
if (Row < numARows && m*TILE_WIDTH+tx < numAColumns)
ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx];
else
ds_M[ty][tx] = 0;
if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows)
ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col];
else
ds_N[ty][tx] = 0;
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += ds_M[ty][k] * ds_N[k][tx];
__syncthreads();
}
if (Row < numCRows && Col < numCColumns)
C[Row*numCColumns+Col] = Pvalue;
}
void initialize_K(float **K,int n)
{ //checked
int i,j,idx;
double h ;
//h = 1/(m+1);
//debugprintf("initialize_K\n");
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
K[i][j] = j;
//printf("K[i][j] is%f\n",K[i][j] );
//printf("i is%d,j is %d\n",i,j);
}
//printf("\n");
}
}
void load_value(int n,float **host_K,float *K)
{ printf("input matrix\n");
int i,j;
for (i = 0; i < n; i++)
{
for(j = 0; j < n; j++)
{
K[i*n+j]=host_K[i][j];
printf("%f ",K[i*n+j] );
}
printf("\n");
}
}
int main(int argc, char ** argv) {
int n=4;
float ** hostA; // The A matrix
float ** hostB; // The B matrix
float * hostA2; // The A matrix
float * hostB2; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numARows=n; // number of rows in the matrix A
int numAColumns=n; // number of columns in the matrix A
int numBRows=n; // number of rows in the matrix B
int numBColumns=n; // number of columns in the matrix B
int numCRows=n; // number of rows in the matrix C (you have to set this)
int numCColumns=n; // number of columns in the matrix C (you have to set this)
hostA=(float **)malloc(sizeof(float *) * n);
for (int i = 0; i < n; i++) {
hostA[i]=(float *)malloc( sizeof(float) * n);
}
hostB=(float **)malloc(sizeof(float *) * n);
for (int i = 0; i < n; i++) {
hostB[i]=(float *)malloc( sizeof(float) * n);
}
deviceB=(float *)malloc( sizeof(float) * n*n);
deviceA=(float *)malloc( sizeof(float) * n*n);
hostB2=(float *)malloc( sizeof(float) * n*n);
hostA2=(float *)malloc( sizeof(float) * n*n);
initialize_K(hostA, n);
initialize_K(hostB, n);
printf("reached here\n");
//@@ Set numCRows and numCColumns
load_value(n,hostB,hostB2);
load_value(n,hostA,hostA2);
//printf("reached here\n");
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC = (float *)malloc(sizeof(float) * numCRows * numCColumns);
//@@ Allocate GPU memory here
cudaMalloc(&deviceA, sizeof(float) * numARows * numAColumns);
cudaMalloc(&deviceB, sizeof(float) * numBRows * numBColumns);
cudaMalloc(&deviceC, sizeof(float) * numCRows * numCColumns);
//@@ Copy memory to the GPU here
cudaMemcpy(deviceA, hostA2, sizeof(float) * numARows * numAColumns, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB2, sizeof(float) * numBRows * numBColumns, cudaMemcpyHostToDevice);
//@@ Initialize the grid and block dimensions here
dim3 dimGrid((numCColumns-1)/TILE_WIDTH+1, (numCRows-1)/TILE_WIDTH+1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
//@@ Launch the GPU Kernel here
matrixMultiply<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC,
numARows, numAColumns,
numBRows, numBColumns,
numCRows, numCColumns);
cudaThreadSynchronize();
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns, cudaMemcpyDeviceToHost);
printf("output matrix\n");
for(int i = 0; i < n*n; i++) {
//printf("K[i][j] result is%f ",hostC[i]);
printf("%f ",hostC[i]);
if((i+1)%n==0)
printf("\n");
}
//@@ Free the GPU memory here
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
5,606 | #include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void readPnm(char * fileName, int &numChannels, int &width, int &height, uint8_t * &pixels)
{
FILE * f = fopen(fileName, "r");
if (f == NULL)
{
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
char type[3];
fscanf(f, "%s", type);
if (strcmp(type, "P2") == 0)
numChannels = 1;
else if (strcmp(type, "P3") == 0)
numChannels = 3;
else
{
fclose(f);
printf("Cannot read %s\n", fileName); // In this exercise, we don't touch other types
exit(EXIT_FAILURE);
}
fscanf(f, "%i", &width);
fscanf(f, "%i", &height);
uint8_t max_val;
fscanf(f, "%hhu", &max_val);
if (max_val > 255)
{
fclose(f);
printf("Cannot read %s\n", fileName); // In this exercise, we assume 1 byte per value
exit(EXIT_FAILURE);
}
pixels = (uint8_t *)malloc(width * height * numChannels);
for (int i = 0; i < width * height * numChannels; i++)
fscanf(f, "%hhu", &pixels[i]);
fclose(f);
}
void writePnm(char * fileName, int numChannels, int width, int height, uint8_t * pixels)
{
FILE * f = fopen(fileName, "w");
if (f == NULL)
{
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
if (numChannels == 1)
fprintf(f, "P2\n");
else if (numChannels == 3)
fprintf(f, "P3\n");
else
{
fclose(f);
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height * numChannels; i++)
fprintf(f, "%hhu\n", pixels[i]);
fclose(f);
}
void compare2Pnms(char * fileName1, char * fileName2)
{
int numChannels1, width1, height1;
uint8_t * pixels1;
readPnm(fileName1, numChannels1, width1, height1, pixels1);
int numChannels2, width2, height2;
uint8_t * pixels2;
readPnm(fileName2, numChannels2, width2, height2, pixels2);
if (numChannels1 != numChannels2)
{
printf("'%s' is DIFFERENT from '%s' (num channels: %i vs %i)\n", fileName1, fileName2, numChannels1, numChannels2);
return;
}
if (width1 != width2)
{
printf("'%s' is DIFFERENT from '%s' (width: %i vs %i)\n", fileName1, fileName2, width1, width2);
return;
}
if (height1 != height2)
{
printf("'%s' is DIFFERENT from '%s' (width: %i vs %i)\n", fileName1, fileName2, height1, height2);
return;
}
float mae = 0;
for (int i = 0; i < width1 * height1 * numChannels1; i++)
{
mae += abs((int)pixels1[i]-(int)pixels2[i]);
}
mae /= (width1 * height1 * numChannels1);
printf("The average pixel difference between '%s' and '%s': %f\n", fileName1, fileName2, mae);
}
void convertRgb2GrayByHost(uint8_t * inPixels, uint8_t * outPixels, int width, int height)
{
// TODO
int size = width * height;
for(int i = 0; i < size; i++){
outPixels[i] = 0.299 * inPixels[i*3] +
0.114 * inPixels[i*3 + 2] +
0.587 * inPixels[i*3 + 1];
}
}
__global__ void convertRgb2GrayByDevice(uint8_t * inPixels, uint8_t * outPixels, int width, int height)
{
// TODO
int i_r = blockIdx.y * blockDim.y + threadIdx.y;
int i_c = blockIdx.x * blockDim.x + threadIdx.x;
if(i_c < width && i_r < height){
outPixels[i_r * width + i_c] = 0.299 * inPixels[(i_r * width + i_c)*3] +
0.114 * inPixels[(i_r * width + i_c)*3 + 2] +
0.587 * inPixels[(i_r * width + i_c)*3 + 1];
}
}
int main(int argc, char ** argv)
{
// -----READ INPUT DATA-----
if (argc < 5 || argc > 7)
{
printf("The number of arguments is invalid\n");
return EXIT_FAILURE;
}
int numChannels, width, height;
uint8_t * inPixels;
readPnm(argv[1], numChannels, width, height, inPixels);
printf("Image size (width x height): %i x %i\n", width, height);
// -----PROCESS INPUT DATA-----
uint8_t * outPixels= (uint8_t *)malloc(width * height);
GpuTimer timer;
timer.Start();
if (strcmp(argv[4], "cpu") == 0){ // Use CPU
convertRgb2GrayByHost(inPixels, outPixels, width, height);
}
else // Use GPU
{
// TODO: Query and print GPU name and compute capability
cudaDeviceProp prop;
CHECK(cudaGetDeviceProperties(&prop, 0));
printf("GPU name: %s\n", prop.name);
printf("GPU compute capability: %d\n", prop.major);
// TODO: Allocate device memories
uint8_t *d_inPixels, *d_outPixels;
CHECK(cudaMalloc(&d_inPixels, width * height * numChannels));
CHECK(cudaMalloc(&d_outPixels, width * height));
// TODO: Copy data to device memories
CHECK(cudaMemcpy(d_inPixels, inPixels, width * height * numChannels, cudaMemcpyHostToDevice));
// TODO: Set block size (already done for you) and grid size,
// and invoke kernel function with these settings (remember to check kernel error)
dim3 blockSize(32, 32); // Default
if (argc == 7)
{
blockSize.x = atoi(argv[5]);
blockSize.y = atoi(argv[6]);
}
dim3 gridSize((width-1)/blockSize.x + 1, (height-1)/blockSize.y + 1);
printf("Block size: %d x %d\n", blockSize.x, blockSize.y);
printf("Grid size: %d x %d\n", gridSize.x, gridSize.y);
convertRgb2GrayByDevice<<<gridSize, blockSize>>>(d_inPixels, d_outPixels, width, height);
// TODO: Copy result from device memories
CHECK(cudaMemcpy(outPixels, d_outPixels, width * height, cudaMemcpyDeviceToHost));
// TODO: Free device memories
cudaFree(d_inPixels);
cudaFree(d_outPixels);
}
timer.Stop();
float time = timer.Elapsed();
printf("Processing time: %f ms\n", time);
// -----WRITE OUTPUT DATA TO FILE-----
writePnm(argv[2], 1, width, height, outPixels);
free(inPixels);
free(outPixels);
// -----CHECK CORRECTNESS-----
compare2Pnms(argv[2], argv[3]);
} |
5,607 | /* ==================================================================
Programmer: Yicheng Tu (ytu@cse.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the rc machines
==================================================================
Steven Faulkner
U9616-1844
Summer 2018
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
unsigned long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of width */
atom * atom_list; /* list of all data points */
int BlockSize;
struct timezone Idunno;
struct timeval startTime, endTime;
void ErrorCheck( cudaError_t err, const char op[])
{
if( err != cudaSuccess )
{
printf("CUDA Error: %s, %s ", op, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*
distance of two points in the atom_list
*/
__device__ double
p2p_distance(atom A, atom B)
{
double x1 = A.x_pos;
double x2 = B.x_pos;
double y1 = A.y_pos;
double y2 = B.y_pos;
double z1 = A.z_pos;
double z2 = B.z_pos;
return sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2) + (z1-z2)*(z1-z2));
}
__global__ void
PDH_baseline(bucket *histo_in, atom *list, double width, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
for(int x = i+1; x < size; ++x)
{
//double distance = p2p_distance(list,i,x);
// int pos = (int) (distance/width);
__syncthreads();
// histo_in[pos].d_cnt++;
__syncthreads();
}
}
__global__ void
pdh_coal(bucket * histogram, atom * list, double width, int size)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(i < size-1){
for(int j = i+1; j < stride;++j){
// double distance = p2p_distance(list,i,j);
// int pos = (int) (distance/width);
// atomicAdd(&histogram[pos].d_cnt,1);
}
i+=stride;
}
}
__global__ void
pdh_priv(bucket * histogram,atom * list, double width, int size, int BlockSize)
{
int t = threadIdx.x;
int b = blockIdx.x;
unsigned int reg = t + b * blockDim.x;
extern __shared__ atom smem[];
atom * private_atom = &smem[0];
atom * localBlock = &smem[sizeof(private_atom)];
if(t < BlockSize)
private_atom[t] = list[reg];
__syncthreads();
//iterate over each block
for(int i = b + 1; i < size/BlockSize; ++i)
{
unsigned int tempIdx = t + i * blockDim.x;
if(tempIdx < BlockSize)
localBlock[t+ sizeof(private_atom)] = list[tempIdx];
__syncthreads();
//iterate through each thread within each block;
for(int j =0; j < BlockSize;++j)
{
double distance = p2p_distance(private_atom[t], localBlock[j]);
int pos = (int) (distance/width);
atomicAdd(&histogram[pos].d_cnt,1);
}
}
for(int i = t+1;i < BlockSize;++i)
{
double distance = p2p_distance(private_atom[t],private_atom[i]);
int pos = (int) (distance/width);
atomicAdd( &histogram[pos].d_cnt,1);
}
}
void output_histogram(bucket *histogram){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("|\n T: %lld \n ", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
if(argc == 4){
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
BlockSize = atoi(argv[3]);
printf("Parameters are %d, %.2f, %d", PDH_acnt,PDH_res,BlockSize);
}
else{
int count = argc -1;
printf("Too Few Arguments to Function, required 3 only recieved %d\n\n", count);
exit(EXIT_FAILURE);
}
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
size_t hist_size = sizeof(bucket)*num_buckets;
size_t atom_size = sizeof(atom)*PDH_acnt;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
bucket *dev_Histo = NULL;
atom *dev_atomL = NULL;
ErrorCheck(cudaMalloc((void**) &dev_Histo,hist_size), "Allocate Memory for Histogram");
ErrorCheck(cudaMalloc((void**) &dev_atomL, atom_size), "Allocate Memory for Atom List");
ErrorCheck(cudaMemcpy(dev_Histo,histogram,hist_size, cudaMemcpyHostToDevice), "Copying Histogram to Device");
ErrorCheck(cudaMemcpy(dev_atomL, atom_list, atom_size, cudaMemcpyHostToDevice), "Copying Atom list to Device");
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// PDH_baseline <<<ceil(PDH_acnt/32), 32 >>> (dev_Histo, dev_atomL, PDH_res, PDH_acnt);
pdh_priv <<<ceil(PDH_acnt/BlockSize),BlockSize,2 * BlockSize*sizeof(atom) >>>(dev_Histo, dev_atomL,PDH_res,PDH_acnt,BlockSize);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
ErrorCheck(cudaMemcpy(histogram, dev_Histo, hist_size, cudaMemcpyDeviceToHost), " Move Histogram back to Host");
/* print out the histogram */
output_histogram(histogram);
//elapsedTime = elapsedTime / 1000;
printf("\n\n******* Total Running Time of Kernal = %.5f milliseconds *******\n\n", elapsedTime);
ErrorCheck(cudaFree(dev_Histo), "Free Device Histogram");
ErrorCheck(cudaFree(dev_atomL), "Free Device Atom List");
ErrorCheck(cudaDeviceReset(), "Reset");
free(histogram);
free(atom_list);
return 0;
}
|
5,608 | /*
* Example from "CUDA by Example"
* https://developer.download.nvidia.com/books/cuda-by-example/cuda-by-example-sample.pdf
*/
#include <stdio.h>
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
static void HandleError(cudaError_t err, const char *file, int line)
{
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString( err ), file, line);
exit(EXIT_FAILURE);
}
}
#define N 10
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // handle the data at this index
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(int) ) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ) );
add<<<N,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ) );
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
5,609 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <cuda.h>
#include <time.h>
#include <sys/types.h>
#include <sys/time.h>
#define VERBOSE 1
__global__ void checkMatchOnDevice(char *fileBuffer, char* searchString, int* matchArray, int numBytes,size_t searchSize,int* matchStartArray, int* matchEndArray)
{
extern __shared__ int sdata[];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int rangeStart;
int rangeEnd;
rangeStart = idx*numBytes;
rangeEnd = rangeStart + numBytes;
int i,j;
int numMatches;
int foundMatch;
int firstMatch = 1;
int firstMatchIndex = -1;
int lastMatchIndex = -1;
unsigned int tid = threadIdx.x;
for(numMatches=0, i = rangeStart; i < rangeEnd; i++)
{
foundMatch = 1;
for(j = 0; j < searchSize; j++)
{
int index = i+j;
if(fileBuffer[index] != searchString[j])
{
foundMatch = 0;
break;
}
}
if(foundMatch)
{
numMatches++;
if(firstMatch)
{
firstMatchIndex = i;
firstMatch = 0;
}
lastMatchIndex = i+searchSize;
i+=searchSize-1;
}
}
matchStartArray[idx] = firstMatchIndex;
matchEndArray[idx] = lastMatchIndex;
//matchArray[idx] = numMatches;
sdata[tid] = numMatches;
__syncthreads();
for(unsigned int s=1;s<blockDim.x;s *= 2){
int index = s*2*tid;
if((index+s) < blockDim.x){
sdata[index] += sdata[index + s];
}
__syncthreads();
}
if(tid==0) matchArray[blockIdx.x]=sdata[0];
}
__global__ void cumulateOnDevice(int* matchArray, int noOfThreads, int* outArray)
{
extern __shared__ int sdata2[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<noOfThreads){
sdata2[tid] = matchArray[i];
//__syncthreads();
for(unsigned int s=1;s<blockDim.x;s*=2){
int index = s*2*tid;
__syncthreads();
if((index+s) < noOfThreads){
sdata2[index] += sdata2[index+s];
}
// __syncthreads();
}
if(tid == 0) matchArray[blockIdx.x] = sdata2[0];
}
}
int main(int argc, char *argv[])
{
struct timeval cpuStart,cpuEnd;
char* searchString = (char*)malloc(sizeof(char*)*80);
char* fileBuffer = (char*)malloc(sizeof(char*)*10000000);
int nBlocks;
int threadsPerBlock;
if(argc != 4)
{
printf("Usage: stringSearch \"Key\" numBlocks threadsPerBlock < inputFile\n");
exit(0);
}
else
{
searchString = argv[1];
nBlocks = atoi(argv[2]);
threadsPerBlock = atoi(argv[3]);
#ifdef VERBOSE
printf("Search String: %s\n",searchString);
#endif
int ptr;
for(ptr = 0; !feof(stdin);)
{
ptr+= fread(&(fileBuffer[ptr]),1,1,stdin);
}
char *deviceFileBuffer; // pointer to device memory
char *deviceSearchBuffer; // pointer to device memory
int *matchArray;
int *outArray;
int *hostMatchArray;
int *hostMatchStartArray;
int *hostMatchEndArray;
int *matchStartArray;
int *matchEndArray;
int fileSize = ptr;
//printf("FileSize: %d",strlen(fileBuffer));
size_t searchSize = strlen(searchString);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaThreadSynchronize();
gettimeofday(&cpuStart, NULL);
// allocate file buffer space on device
cudaMalloc((void **) &deviceFileBuffer, fileSize);
// allocate search string space on device
cudaMalloc((void **) &deviceSearchBuffer, searchSize);
// copy data from host to device
cudaMemcpy(deviceFileBuffer, fileBuffer, fileSize, cudaMemcpyHostToDevice);
cudaMemcpy(deviceSearchBuffer, searchString, searchSize, cudaMemcpyHostToDevice);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
size_t numThreads = nBlocks*threadsPerBlock;
int numBytesPerThread = fileSize/numThreads;
//Allocate match array space on device
cudaMalloc((void **) &matchArray, sizeof(int)*numThreads);
cudaMalloc((void **) &outArray, sizeof(int)*numThreads);
hostMatchArray = (int*)malloc(sizeof(int)*numThreads);
cudaMalloc((void **) &matchStartArray, sizeof(int)*numThreads);
hostMatchStartArray = (int*)malloc(sizeof(int)*numThreads);
cudaMalloc((void **) &matchEndArray, sizeof(int)*numThreads);
hostMatchEndArray = (int*)malloc(sizeof(int)*numThreads);
//Init array to 0
int i;
for(i = 0; i < numThreads; i++)
{
hostMatchArray[i] = 0;
hostMatchStartArray[i] = -1;
hostMatchEndArray[i] = -1;
}
cudaMemcpy(matchArray, hostMatchArray, numThreads, cudaMemcpyHostToDevice);
cudaMemcpy(matchStartArray, hostMatchStartArray, numThreads, cudaMemcpyHostToDevice);
cudaMemcpy(matchEndArray, hostMatchEndArray, numThreads, cudaMemcpyHostToDevice);
//printf("Number of threads:%d, Number of blocks:%d, Num Threads Per Block:%d, Num Bytes Per Thread:%d\n",numThreads,nBlocks,threadsPerBlock,numBytesPerThread);
// Part 2 of 2. Call call checkMatchOnDevice kernel
cudaEventRecord( start, 0 );
checkMatchOnDevice <<< nBlocks, threadsPerBlock , threadsPerBlock*sizeof(int)>>> (deviceFileBuffer, deviceSearchBuffer, matchArray,numBytesPerThread,searchSize,matchStartArray,matchEndArray);
int newNBlocks=nBlocks,newNThreads;//printf("\nNew Blocks:%d",nBlocks);
cudaThreadSynchronize();
while(newNBlocks > 1){
newNThreads = newNBlocks;
newNBlocks = (newNBlocks/threadsPerBlock)+1;
cumulateOnDevice <<< newNBlocks, threadsPerBlock ,threadsPerBlock * sizeof(int)>>> (matchArray,newNThreads,outArray);
}
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaThreadSynchronize();
// Retrieve result from device and store in host array
cudaMemcpy(hostMatchArray, matchArray, sizeof(int)*numThreads, cudaMemcpyDeviceToHost);
cudaMemcpy(hostMatchStartArray, matchStartArray, sizeof(int)*numThreads, cudaMemcpyDeviceToHost);
cudaMemcpy(hostMatchEndArray, matchEndArray, sizeof(int)*numThreads, cudaMemcpyDeviceToHost);
int total = 0;
//for(i = 0; i < numThreads; i++)
//{
//total += hostMatchArray[i];
//printf("%d)%d\n",i,hostMatchArray[i]);
//}
total = hostMatchArray [0];
//Overlap check, commented out for hw2
/* for(i = 0; i < numThreads; i++)
{
if(hostMatchEndArray[i] != -1 && hostMatchStartArray[i+1] != -1)
{
if(hostMatchEndArray[i] - hostMatchStartArray[i+1] < 0)
total--;
}
//printf("%d)%d\n",i,hostMatchStartArray[i]);
//printf("start:%d,end:%d\n",hostMatchStartArray[i],hostMatchEndArray[i]);
}*/
gettimeofday(&cpuEnd, NULL);
// float totalTime = (cpuEnd - cpuStart);
//printf("Number of threads:%d, Number of blocks:%d, Num Threads Per Block:%d, Num Bytes Per Thread:%d\n",numThreads,nBlocks,threadsPerBlock,numBytesPerThread);
//printf("numOfThread: %4d matchCount: %4d CPUrunningTime: %8ld\n", blocksize, thread_num, sum, q.tv_usec - p.tv_usec + (q.tv_sec-p.tv_sec)*1000000);
#ifdef VERBOSE
printf("Completed Successfully! Number of blocks:%d Number of threads per block:%d Num Threads: %d Matches:%d CPU Time:%8ld GPU Time:%f\n\n",nBlocks,threadsPerBlock,numThreads,total,cpuEnd.tv_usec - cpuStart.tv_usec + (cpuEnd.tv_sec - cpuStart.tv_sec ),time);
#else
printf("%d %f\n\n",numThreads,time);
//printf("%d %8ld\n\n",numThreads,cpuEnd.tv_usec - cpuStart.tv_usec + (cpuEnd.tv_usec - cpuStart.tv_usec) );
#endif
cudaFree(matchArray);
cudaFree(outArray);
}
}
|
5,610 | #include <iostream>
#include <stdio.h>
#include <vector>
#define MAX_THREADS 256
#define SIZE 131072
#define __START__ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0);
#define __STOP__(_V) cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); _V.push_back(time); cudaEventDestroy(start); cudaEventDestroy(stop);
#define __NEXT__(_V) __STOP__(_V) __START__
__global__ void sumall_kernel_shared(float *d_vector, float *d_result)
{
__shared__ float cache[MAX_THREADS];
int cacheIdx = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
cache[cacheIdx] = i >= SIZE ? 0. : d_vector[i];
__syncthreads();
if (i >= SIZE)
return;
int padding = blockDim.x/2;
while (padding != 0)
{
if (cacheIdx < padding && i+padding < SIZE)
cache[cacheIdx] += cache[cacheIdx + padding];
__syncthreads();
padding /= 2;
}
if (cacheIdx == 0)
atomicAdd(&d_result[0], cache[0]);
}
__global__ void sumall_kernel_global(float *d_vector, float *d_result)
{ // /!\ it changes d_vector
int cacheIdx = threadIdx.x;
int deltaIdx = blockIdx.x * blockDim.x;
int i = deltaIdx + cacheIdx;
if (i >= SIZE)
return;
int padding = blockDim.x/2;
while (padding != 0)
{
if (cacheIdx < padding && i+padding < SIZE)
d_vector[i] += d_vector[i + padding];
__syncthreads();
padding /= 2;
}
if (cacheIdx == 0)
atomicAdd(&d_result[0], d_vector[deltaIdx]);
}
void showMean(std::vector<float> v)
{
float sum(0);
for (unsigned int i(0) ; i!=v.size() ; i++)
sum += v[i];
std::cout << 1000.*sum/v.size() << " microseconds" << std::endl;
}
int main(int argc, char **argv)
{
std::cout << "SIZE (vs Shared): " << SIZE << std::endl;
cudaEvent_t start, stop;
std::vector<float> sharedRun, globalRun;
float time(0);
cudaFree(0); // Force runtime API context establishment
float h_vector[SIZE]; // For input and output
for (unsigned int i(0) ; i!=SIZE ; i++)
h_vector[i] = i;
float h_result;
for (unsigned int i(0) ; i!=1000 ; i++)
{
float *d_vector, *d_result;
/* SHARED */
cudaMalloc(&d_vector, SIZE*sizeof(float));
cudaMalloc(&d_result, sizeof(float));
cudaMemcpy(d_vector, h_vector, SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(d_result, 0, sizeof(float));
__START__
sumall_kernel_shared<<<(SIZE+MAX_THREADS-1)/MAX_THREADS, MAX_THREADS>>>(d_vector, d_result);
cudaThreadSynchronize(); // Block until the device is finished
__STOP__(sharedRun);
cudaMemcpy(h_vector, d_vector, SIZE*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&h_result, d_result, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_vector);
cudaFree(d_result);
/* GLOBAL */
cudaMalloc(&d_vector, SIZE*sizeof(float));
cudaMalloc(&d_result, sizeof(float));
cudaMemcpy(d_vector, h_vector, SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(d_result, 0, sizeof(float));
__START__
sumall_kernel_global<<<(SIZE+MAX_THREADS-1)/MAX_THREADS, MAX_THREADS>>>(d_vector, d_result);
cudaThreadSynchronize(); // Block until the device is finished
__STOP__(globalRun);
cudaMemcpy(h_vector, d_vector, SIZE*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&h_result, d_result, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_vector);
cudaFree(d_result);
}
showMean(sharedRun);
showMean(globalRun);
}
|
5,611 | #include <iostream>
#include <cstring>
#include <fstream>
#include <algorithm>
#include <cmath>
#include <ctime>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#define EPS 1e-3
//#define WRITE_TO_FILE
#define NX 32
#define NY 32
#define PHI_CPU(x,y) sin(M_PI*x)*sin(M_PI*y)
#define PHI_GPU(x,y) __sinf(M_PI*x)*__sinf(M_PI*y)
#define PSI_CPU(x,y) 0
#define PSI_GPU(x,y) 0
#define F_CPU(x,y,t) 0
#define F_GPU(x,y,t) 0
using namespace std;
//Обработчик ошибок
static void HandleError(cudaError_t err, const char *file, int line)
{
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err),file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( error ) (HandleError( error, __FILE__, __LINE__ ))
__global__ void first_layer_kernel(double *U,double *Uprev,double tau,double a, int N1n,int N2n, double h1, double h2)
{
int i=threadIdx.x+blockIdx.x*blockDim.x+1;
int j=threadIdx.y+blockIdx.y*blockDim.y+1;
if((i < N1n-1)&&(j<N2n-1))
U[i*N2n+j]=Uprev[i*N2n+j]+tau*PSI_GPU(i*h1,j*h2)+
tau*tau*0.5*F_GPU(i*h1,j*h2,0.0)+
a*a*tau*tau*0.5*((PHI_GPU((i+1)*h1,j*h2)-2.0*PHI_GPU(i*h1,j*h2)+PHI_GPU((i-1)*h1,j*h2))/(h1*h1)+(PHI_GPU(i*h1,(j+1)*h2)-2.0*PHI_GPU(i*h1,j*h2)+PHI_GPU(i*h1,(j-1)*h2))/(h2*h2));
}
__global__ void calc_F_kernel(double *U,double *Uprev,double *F,double tau,double t, int N1n,int N2n, double h1, double h2)
{
int i=threadIdx.x+blockIdx.x*blockDim.x+1;
int j=threadIdx.y+blockIdx.y*blockDim.y+1;
if((i < N1n-1)&&(j<N2n-1))
F[i*N2n+j]=Uprev[i*N2n+j]/(tau*tau)-2.0*U[i*N2n+j]/(tau*tau)-F_GPU(i*h1,j*h2,t+tau);
}
__global__ void iter_kernel(double *Unext,double *Uprev,double *F,double *M,double *errdev, int N1n,int N2n)
{
int i=threadIdx.x+blockIdx.x*blockDim.x+1;
int j=threadIdx.y+blockIdx.y*blockDim.y+1;
if((i < N1n-1)&&(j<N2n-1))
{
Unext[i*N2n+j]=Uprev[i*N2n+j]+1.0/M[(i*N2n+j)*5+2]*(F[i*N2n+j]-M[(i*N2n+j)*5]*Uprev[(i-1)*N2n+j]-M[(i*N2n+j)*5+1]*Uprev[i*N2n+j-1]-M[(i*N2n+j)*5+2]*Uprev[i*N2n+j]-M[(i*N2n+j)*5+3]*Uprev[i*N2n+j+1]-M[(i*N2n+j)*5+4]*Uprev[(i+1)*N2n+j]);
errdev[i*N2n+j]=abs(Unext[i*N2n+j]-Uprev[i*N2n+j]);
}
}
double solveGPU(double a,double L1,double L2,double T,double tau,int N1,int N2)
{
double *Unext,*U,*Uprev,*Uloc,*errdev,*M,*Mdev,*F;
double h1=L1/N1,h2=L2/N2,maxerr=0;
int N1n=N1+1,N2n=N2+1;
double t=tau;
float gputime=0.0;
size_t size=N1n*N2n*sizeof(double);
dim3 threads(NX,NY,1),blocks((N1-1)%NX==0?(N1-1)/NX:(N1-1)/NX+1,(N2-1)%NY==0?(N2-1)/NY:(N2-1)/NY+1,1);
Uloc=new double[N1n*N2n];
M=new double[N1n*N2n*5];
HANDLE_ERROR( cudaMalloc(&U,size) );
HANDLE_ERROR( cudaMalloc(&Unext,size) );
HANDLE_ERROR( cudaMalloc(&Uprev,size) );
HANDLE_ERROR( cudaMalloc(&Mdev,size*5) );
HANDLE_ERROR( cudaMalloc(&errdev,size) );
HANDLE_ERROR( cudaMalloc(&F,size) );
thrust::device_ptr<double> err_ptr = thrust::device_pointer_cast(errdev);
#ifdef WRITE_TO_FILE
ofstream ofile("../datagpu.dat");
ofile.precision(16);
#endif
//Нулевой временной слой
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
{
Uloc[i*N2n+j]=PHI_CPU(i*h1,j*h2);
#ifdef WRITE_TO_FILE
ofile<<Uloc[i*N2n+j]<<' ';
#endif
}
#ifdef WRITE_TO_FILE
ofile<<endl;
#endif
}
#ifdef WRITE_TO_FILE
ofile<<endl;
ofile<<endl;
#endif
HANDLE_ERROR( cudaMemcpy(Uprev,Uloc,size,cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemcpy(U,Uprev,size,cudaMemcpyDeviceToDevice) );
HANDLE_ERROR( cudaMemcpy(Unext,Uprev,size,cudaMemcpyDeviceToDevice) );
//Первый временной слой
cudaEvent_t start,stop;
HANDLE_ERROR( cudaEventCreate(&start) );
HANDLE_ERROR( cudaEventCreate(&stop) );
HANDLE_ERROR( cudaEventRecord(start) );
first_layer_kernel<<<blocks,threads>>>(U,Uprev,tau,a,N1n,N2n,h1,h2/*,phi,psi,f*/);
HANDLE_ERROR( cudaGetLastError() );
HANDLE_ERROR( cudaDeviceSynchronize() );
#ifdef WRITE_TO_FILE
HANDLE_ERROR( cudaMemcpy(Uloc,U,size,cudaMemcpyDeviceToHost) );
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
ofile<<Uloc[i*N2n+j]<<' ';
ofile<<endl;
}
ofile<<endl;
ofile<<endl;
#endif
//Формирование матрицы системы
for(int i=0;i<N1n;i++)
for(int j=0;j<N2n;j++)
if((i==0)||(j==0)||(i==N1)||(j==N2))
{
M[(i*N2n+j)*5]=0.0;
M[(i*N2n+j)*5+1]=0.0;
M[(i*N2n+j)*5+2]=1.0;
M[(i*N2n+j)*5+3]=0.0;
M[(i*N2n+j)*5+4]=0.0;
}
else
{
M[(i*N2n+j)*5]=a*a/(h1*h1);
M[(i*N2n+j)*5+1]=a*a/(h2*h2);
M[(i*N2n+j)*5+2]=-2.0*a*a*(1.0/(h1*h1)+1.0/(h2*h2))-1.0/(tau*tau);
M[(i*N2n+j)*5+3]=a*a/(h2*h2);
M[(i*N2n+j)*5+4]=a*a/(h1*h1);
}
HANDLE_ERROR( cudaMemcpy(Mdev,M,5*size,cudaMemcpyHostToDevice) );
//Основной цикл
while(t<T-0.5*tau)
{
calc_F_kernel<<<blocks,threads>>>(U,Uprev,F,tau,t,N1n,N2n,h1,h2);
HANDLE_ERROR( cudaGetLastError() );
HANDLE_ERROR( cudaDeviceSynchronize() );
HANDLE_ERROR( cudaMemcpy(Uprev,U,size,cudaMemcpyDeviceToDevice) );
do{
iter_kernel<<<blocks,threads>>>(Unext,Uprev,F,Mdev,errdev,N1n,N2n);
HANDLE_ERROR( cudaGetLastError() );
HANDLE_ERROR( cudaDeviceSynchronize() );
thrust::device_ptr<double> max_ptr = thrust::max_element(err_ptr+1, err_ptr + N1n*N2n-1);
maxerr=max_ptr[0];
swap(Uprev,Unext);
}while(maxerr>EPS);
#ifdef WRITE_TO_FILE
HANDLE_ERROR( cudaMemcpy(Uloc,Unext,size,cudaMemcpyDeviceToHost) );
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
ofile<<Uloc[i*N2n+j]<<' ';
ofile<<endl;
}
ofile<<endl;
ofile<<endl;
#endif
t+=tau;
swap(Uprev,U);
}
HANDLE_ERROR( cudaMemcpy(Uloc,Unext,size,cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaEventRecord(stop) );
HANDLE_ERROR( cudaEventSynchronize(stop) );
HANDLE_ERROR( cudaEventElapsedTime(&gputime,start,stop) );
HANDLE_ERROR( cudaFree(U) );
HANDLE_ERROR( cudaFree(Unext) );
HANDLE_ERROR( cudaFree(Uprev) );
HANDLE_ERROR( cudaFree(Mdev) );
HANDLE_ERROR( cudaFree(errdev) );
HANDLE_ERROR( cudaFree(F) );
HANDLE_ERROR( cudaEventDestroy(start) );
HANDLE_ERROR( cudaEventDestroy(stop) );
delete[] Uloc;
delete[] M;
#ifdef WRITE_TO_FILE
ofile.close();
#endif
return (double)gputime/1000.0;
}
float solveCPU(double a,double L1,double L2,double T,double tau,int N1,int N2)
{
#ifdef WRITE_TO_FILE
ofstream ofile("../datacpu.dat");
ofile.precision(16);
#endif
float cputime=0;
double *Uprev,*U,*Unext,*F;
double *M;
int N1n=N1+1,N2n=N2+1;
double h1=L1/N1,h2=L2/N2,t=tau;
Uprev=new double[N1n*N2n];
U=new double[N1n*N2n];
F=new double[N1n*N2n];
Unext=new double[N1n*N2n];
M=new double[N1n*N2n*5];
double maxerr;
//Нулевой временной слой
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
{
Uprev[i*N2n+j]=PHI_CPU(i*h1,j*h2);
#ifdef WRITE_TO_FILE
ofile<<Uprev[i*N2n+j]<<' ';
#endif
}
#ifdef WRITE_TO_FILE
ofile<<endl;
#endif
}
#ifdef WRITE_TO_FILE
ofile<<endl;
ofile<<endl;
#endif
cputime=clock();
//Первый временной слой
// for(int i=0;i<N1n;i++)
// for(int j=0;j<N2n;j++)
// if((i==0)||(j==0)||(i==N1)||(j==N2))
// {
// M[(i*N2n+j)*5]=0.0;
// M[(i*N2n+j)*5+1]=0.0;
// M[(i*N2n+j)*5+2]=1.0;
// M[(i*N2n+j)*5+3]=0.0;
// M[(i*N2n+j)*5+4]=0.0;
// F[i*N2n+j]=Uprev[i*N2n+j];
// }
// else
// {
// M[(i*N2n+j)*5]=-0.5*tau*tau*a*a/(h1*h1);
// M[(i*N2n+j)*5+1]=-0.5*tau*tau*a*a/(h2*h2);
// M[(i*N2n+j)*5+2]=tau*tau*a*a*(1.0/(h1*h1)+1.0/(h2*h2))+1.0;
// M[(i*N2n+j)*5+3]=-0.5*tau*tau*a*a/(h2*h2);
// M[(i*N2n+j)*5+4]=-0.5*tau*tau*a*a/(h1*h1);
// F[i*N2n+j]=-Uprev[i*N2n+j]-tau*PSI_CPU(i*h1,j*h2)-tau*tau*0.5*F_CPU(i*h1,j*h2,tau)+a*a*tau*tau*((PHI_CPU((i+1)*h1,j*h2)-2.0*PHI_CPU(i*h1,j*h2)+PHI_CPU((i-1)*h1,j*h2))/(h1*h1)+(PHI_CPU(i*h1,(j+1)*h2)-2.0*PHI_CPU(i*h1,j*h2)+PHI_CPU(i*h1,(j-1)*h2))/(h2*h2));
// }
// memcpy(U,Uprev,N1n*N2n*sizeof(double));
// do{
// maxerr=0;
// for(int i=1;i<N1n-1;i++)
// for(int j=1;j<N2n-1;j++)
// Unext[i*N2n+j]=U[i*N2n+j]+1.0/M[(i*N2n+j)*5+2]*(F[i*N2n+j]-M[(i*N2n+j)*5]*U[(i-1)*N2n+j]-M[(i*N2n+j)*5+1]*U[i*N2n+j-1]-M[(i*N2n+j)*5+2]*U[i*N2n+j]-M[(i*N2n+j)*5+3]*U[i*N2n+j+1]-M[(i*N2n+j)*5+4]*U[(i+1)*N2n+j]);
// for(int i=0;i<N1n*N2n;i++)
// {
// double err=abs(Unext[i]-U[i]);
// if(err>maxerr)maxerr=err;
// }
// swap(U,Unext);
// }while(maxerr>EPS);
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
{
if((i==0)||(j==0)||(i==N1)||(j==N2))
{
U[i*N2n+j]=Uprev[i*N2n+j];
Unext[i*N2n+j]=Uprev[i*N2n+j];
}
else
{
U[i*N2n+j]=Uprev[i*N2n+j]+tau*PSI_CPU(i*h1,j*h2)+
tau*tau*0.5*F_CPU(i*h1,j*h2,0.0)+
a*a*tau*tau*0.5*((PHI_CPU((i+1)*h1,j*h2)-2.0*PHI_CPU(i*h1,j*h2)+PHI_CPU((i-1)*h1,j*h2))/(h1*h1)+(PHI_CPU(i*h1,(j+1)*h2)-2.0*PHI_CPU(i*h1,j*h2)+PHI_CPU(i*h1,(j-1)*h2))/(h2*h2));
}}}
#ifdef WRITE_TO_FILE
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
ofile<<U[i*N2n+j]<<' ';
ofile<<endl;
}
ofile<<endl;
ofile<<endl;
#endif
//Следующие временные слои
for(int i=0;i<N1n;i++)
for(int j=0;j<N2n;j++)
if((i==0)||(j==0)||(i==N1)||(j==N2))
{
M[(i*N2n+j)*5]=0.0;
M[(i*N2n+j)*5+1]=0.0;
M[(i*N2n+j)*5+2]=1.0;
M[(i*N2n+j)*5+3]=0.0;
M[(i*N2n+j)*5+4]=0.0;
}
else
{
M[(i*N2n+j)*5]=a*a/(h1*h1);
M[(i*N2n+j)*5+1]=a*a/(h2*h2);
M[(i*N2n+j)*5+2]=-2.0*a*a*(1.0/(h1*h1)+1.0/(h2*h2))-1.0/(tau*tau);
M[(i*N2n+j)*5+3]=a*a/(h2*h2);
M[(i*N2n+j)*5+4]=a*a/(h1*h1);
}
while(t<T-0.5*tau)
{
for(int i=1;i<N1n-1;i++)
for(int j=1;j<N2n-1;j++)
F[i*N2n+j]=Uprev[i*N2n+j]/(tau*tau)-2.0*U[i*N2n+j]/(tau*tau)-F_CPU(i*h1,j*h2,t+tau);
memcpy(Uprev,U,N1n*N2n*sizeof(double));
do{
maxerr=0;
for(int i=1;i<N1n-1;i++)
for(int j=1;j<N2n-1;j++)
Unext[i*N2n+j]=Uprev[i*N2n+j]+1.0/M[(i*N2n+j)*5+2]*(F[i*N2n+j]-M[(i*N2n+j)*5]*Uprev[(i-1)*N2n+j]-M[(i*N2n+j)*5+1]*Uprev[i*N2n+j-1]-M[(i*N2n+j)*5+2]*Uprev[i*N2n+j]-M[(i*N2n+j)*5+3]*Uprev[i*N2n+j+1]-M[(i*N2n+j)*5+4]*Uprev[(i+1)*N2n+j]);
for(int i=0;i<N1n*N2n;i++)
{
double err=abs(Unext[i]-Uprev[i]);
if(err>maxerr)maxerr=err;
}
swap(Uprev,Unext);
}while(maxerr>EPS);
t+=tau;
swap(Uprev,U);
#ifdef WRITE_TO_FILE
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
ofile<<U[i*N2n+j]<<' ';
ofile<<endl;
}
ofile<<endl;
ofile<<endl;
#endif
}
cputime=(double)(clock()-cputime)/CLOCKS_PER_SEC;
#ifdef WRITE_TO_FILE
ofile.close();
#endif
delete[] Uprev;
delete[] U;
delete[] Unext;
delete[] M;
delete[] F;
return cputime;
}
int main()
{
float gpu,cpu;
cpu=solveCPU(1.0,1.0,1.0,10,0.01,500,500);
cout<<"CPU Time: "<<cpu<<endl;
gpu=solveGPU(1.0,1.0,1.0,10,0.01,500,500);
cout<<"GPU Time: "<<gpu<<endl;
cout<<"Max ratio:"<<cpu/gpu<<endl;
return 0;
}
|
5,612 | // Simulator a lottery system
//
// Author: Yili Zou
//
// For the GPU Programming class, NDSU Spring '14
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <curand_kernel.h>
#define FILE_CREATE_ERROR -1
#define Number_Max 9
#define Number_Min 0
#define THREADS_PER_BLOCK 10 // Setting the grid up
#define BLOCKS_PER_GRID 1
#define OFFSET 0 // No offset
__global__ void Setup_RNG(curandState *state, int seed)
{
// Setup of the random number generator. It seeds it, sets the sequence according to the thread id
curand_init(seed, threadIdx.x + blockIdx.x * THREADS_PER_BLOCK, OFFSET, &state[threadIdx.x + blockIdx.x * THREADS_PER_BLOCK]);
}
__global__ void RNG(curandState *state, int *result)
{
int id_k = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; // Here we calculate the id_k as to save calculations
curandState localState = state[id_k]; // Copy it to local memory to save global memory accesses (faster)
result[id_k] = curand(&localState)/(RAND_MAX/5); // Use the state to generate the random number AND updates the state,the range will be from 0 to 9, which is a dice
state[id_k] = localState; // Update the state in global memory. This allows the next generation to be uncorrelated to this generation
}
__global__ void Number_Matching(int *lucky_numbers, int *user_numbers, int *Matching_numbers)
{
//set up a counter to see how many numbers are matching
int counter=0; //initialize the counter, 0 is not matching, 1 is matching.
if(lucky_numbers[threadIdx.x]==user_numbers[threadIdx.x])
{
counter++;
}
Matching_numbers[threadIdx.x]= counter; //for every index that is matching, counter becomes 1, so this is a array of where these matching numbers are
}
int main()
{
//the array to store users number
int user_number[10];
//the array to store the lucky number
int price_number[10];
//define a address to store randomnumbers on the device
int *randomnumbers_d;
//how many numbers are matching
int numbers_matching[10];
//define stuff in device
int *price_number_d;
int *user_number_d;
int *numbers_matching_d;
//States
curandState *states_d;
// Allocate memory on the device
cudaMalloc((void **)&randomnumbers_d, THREADS_PER_BLOCK*sizeof( int));
cudaMalloc((void **)&states_d, THREADS_PER_BLOCK*sizeof(curandState));
// Set up grid and block
dim3 dimGrid(BLOCKS_PER_GRID);
dim3 dimBlock(THREADS_PER_BLOCK);
// Set up RNG
Setup_RNG<<<dimGrid, dimBlock>>>(states_d, time(NULL));
RNG<<<dimGrid, dimBlock>>>(states_d, randomnumbers_d); // Launch RNG
//copy results back
cudaMemcpy(price_number, randomnumbers_d, THREADS_PER_BLOCK*sizeof(unsigned int), cudaMemcpyDeviceToHost);
//user interface
printf("\nThe 10 lucky number have been generated, please input your lucky numbers!!(from 0 to 9)\n");
//encourage user to input numbers
int input; //the input by users
for(int i=0; i<10; i++)
{
while(1)
{
scanf("%d", &input); //scan the input
if(input<0 || input >9)
{
printf("\n Please enter numbers within 0 to 9!\n"); //encourage to input valide number
}
else
break;
}
user_number[i]=input;
}
printf("\nYour lucky numbers have been picked, waiting for results\n");
// Allocate memory on the device
cudaMalloc((void **)&numbers_matching_d, 10*sizeof( int));
cudaMalloc((void **)&price_number_d, THREADS_PER_BLOCK*sizeof( int));
cudaMalloc((void **)&user_number_d, THREADS_PER_BLOCK*sizeof( int));
//copy the parameters in the device
cudaMemcpy(user_number_d, user_number, THREADS_PER_BLOCK*sizeof(int ), cudaMemcpyHostToDevice);
cudaMemcpy(price_number_d, price_number, THREADS_PER_BLOCK*sizeof(int ), cudaMemcpyHostToDevice);
//Launch number matching kernel
Number_Matching<<<dimGrid, dimBlock>>>(price_number_d, user_number_d, numbers_matching_d);
//copy the result
cudaMemcpy(numbers_matching, numbers_matching_d, 10*sizeof(int), cudaMemcpyDeviceToHost);
//clean up memory
cudaFree(numbers_matching_d);
cudaFree(price_number_d);
cudaFree(user_number_d);
//how many numbers matching
int nMatch=0;
//show result
printf("lucky numbers:\n");
for(int i=0; i<10; i++)
{
nMatch+=numbers_matching[i]; //numbers_matching[i] is going to be either 1 or 0, so sum them all up we can get the totally numbers matching
printf("%d\n",price_number[i]);
}
printf("\n You have %d numbers matching!", nMatch);
if(nMatch==10) //when all matches, win the price, which is not likely to happen
{
printf("\n Conflagrations! You have won 1 Million dollars! \n");
}
return EXIT_SUCCESS;
}
|
5,613 | // **********************************************************************************************************
// PURPOSE : Index calculation for array to access data from kernel. *
// LANGUAGE : CUDA C / CUDA C++ *
// ASSUMPTIONS : This code is part of CUDA fundamentals section. *
// DATE : 24 March 2020 *
// AUTHOR : Vaibhav BENDRE (vaibhav.bendre7520@gmail.com) *
// *
// **********************************************************************************************************
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
#include<iostream>
__global__ void uniqueIdxCalcUsingThreadIdx(int* arr) {
unsigned int tidx{ threadIdx.x };
printf("threadIdx : %d, value : %d \n", tidx, arr[tidx]);
}
int main() {
int arrSize{ 16 };
int arrByteSize{ static_cast<int>(sizeof(int))* arrSize };
int arrData[]{ 23,9,4,53,65,12,1,33,87,45,23,12,342,56,44,99 };
for ( int iCounter{ 0 }; iCounter < arrSize; ++iCounter) {
std::cout << arrData[iCounter] << " ";
}
std::cout << "\n\n\n";
int* data;
cudaMalloc((void**)&data, arrByteSize);
cudaMemcpy(data, arrData, arrByteSize, cudaMemcpyHostToDevice);
dim3 block{ 16 };
dim3 grid{ 1 };
uniqueIdxCalcUsingThreadIdx <<< grid, block >>> (data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
5,614 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
//#include <chrono>
#include <time.h>
#include <cmath>
//#include <random>
using namespace std;
#define BLOCK_SIZE 16
//mt19937_64 rd;
typedef struct
{
int height, width;
float *elements;
}Matrix;
__global__ void MatAddKernel(const Matrix, const Matrix, Matrix);
bool check(const Matrix A, const Matrix B, const Matrix C)
{
float eps = 1e-10;
for(size_t i = 0; i < A.height * A.width; ++i)
{
if (fabs(A.elements[i] + B.elements[i] - C.elements[i]) > eps)
return false;
}
return true;
}
void MatAdd(const Matrix A, const Matrix B, Matrix C)
{
Matrix d_A, d_B, d_C;
size_t total_size = sizeof(float) * A.height * A.width;
d_A.height = A.height; d_A.width = A.width;
cudaError_t err = cudaMalloc(&d_A.elements, total_size);
if (err != cudaSuccess)
{
printf("fail to malloc A: %s", cudaGetErrorString(err));
}
err = cudaMemcpy(d_A.elements, A.elements, total_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
printf("fail to memcpy A: %s\n", cudaGetErrorString(err));
}
d_B.height = B.height; d_B.width = B.width;
err = cudaMalloc(&d_B.elements, total_size);
if (err != cudaSuccess)
{
printf("fail to malloc B: %s", cudaGetErrorString(err));
}
err = cudaMemcpy(d_B.elements, B.elements, total_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
printf("fail to memcpy B: %s\n", cudaGetErrorString(err));
}
d_C.height = C.height; d_C.width = C.width;
err = cudaMalloc(&d_C.elements, total_size);
if (err != cudaSuccess)
{
printf("fail to malloc C: %s", cudaGetErrorString(err));
}
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((A.width + dimBlock.x - 1) / dimBlock.x,
(A.height + dimBlock.y - 1) / dimBlock.y);
// Time region begins
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
MatAddKernel<<<dimGrid, dimBlock>>> (d_A, d_B, d_C);
cudaEventRecord(stop);
cudaMemcpy(C.elements, d_C.elements, total_size, cudaMemcpyDeviceToHost);
err = cudaEventSynchronize(stop);
if (err == cudaSuccess)
{
printf("computing done\n");
}
else
{
printf("fail to compute\n");
}
// Time region ends
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("total time: %.4f ms\n", milliseconds);
}
__global__ void MatAddKernel(const Matrix A, const Matrix B, Matrix C)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < A.width && row < A.height)
C.elements[row * C.width + col] = A.elements[row * A.width + col] + B.elements[row * B.width + col];
}
void generate_matrix(Matrix &A)
{
size_t total = A.height * A.width;
for(size_t i = 0; i < total; ++i)
{
A.elements[i] = rand();
}
}
int main(int argc, char **argv)
{
/* Usage:
EXE HEIGHT WIDTH SEED
*/
if (argc < 4)
{
printf("Usage: %s HEIGHT WIDTH SEED\n", argv[0]);
return 0;
}
// To parse arguments
int height = atoi(argv[1]);
int width = atoi(argv[2]);
int seed = atol(argv[3]);
// To seed the random device
//rd.seed(seed);
// To set up and gerenerate random matrix
Matrix A, B, C;
size_t matrix_size = sizeof(float) * height * width;
A.height = height; A.width = width;
A.elements = static_cast<float*>(malloc(matrix_size));
generate_matrix(A);
B.height = height; B.width = width;
B.elements = static_cast<float*>(malloc(matrix_size));
generate_matrix(B);
C.height = height; C.width = width;
C.elements = static_cast<float*>(malloc(matrix_size));
printf("Matrices are generated.\n");
MatAdd(A, B, C);
if (check(A, B, C))
{
printf("check succeed\n");
}
else
{
printf("check failed\n");
}
return 0;
}
|
5,615 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <math.h>
#include <time.h>
#include <curand_kernel.h>
#include <sys/time.h>
#define TRUE 1
unsigned int EMPTY = UINT_MAX;
char str[200];
int i, j, k, e, p, na;
int nb; // numero de blocos a serem usados no kernel
FILE *fp;
int line=1;
int nnos, idx_ni, nfol; // numero de nos, indice de nos internos, numero de folhas
int hnnos; // tamanho da tabela hash
int ennos; // tamanho do vetor com as distancias entre as especies (matriz triangular superior)
int pos_ins, n_ins; // posicao de insercoes e numero de insercoes
int *nz; // contem indice do no; para os nos a serem inseridos, contem o indice do ponto de insercao
// para os nos internos a serem usados na insercao, contem -2
float *nz_br; // distancia do ramo (branch)
float *nz_dr; // distancias ate o no raiz
float *nz_de; // distancias entre especies
int *nz_qf; // altura do no
int *nz_qe; // quantidade de especies abaixo do no
int *nz_p; // pai dono
int *nz_f1; // filho da esquerda do no
int *nz_f2; // filho da direita do no
unsigned int *nz_sig; // assinatura do no - da o caminho em bits ate o raiz
unsigned int *nz_hsig; // hash da assinatura do no
unsigned int *nz_hval; // indice do no na tabela hash
long long GPU_start_time;
long long GPU_time;
// pointers to GPU memory
int *nz_d;
float *nz_br_d;
float *nz_dr_d;
float *nz_de_d;
int *nz_qf_d;
int *nz_qe_d;
int *nz_p_d;
int *nz_f1_d;
int *nz_f2_d;
unsigned int *nz_sig_d;
unsigned int *nz_hsig_d;
unsigned int *nz_hval_d;
//int pos_ins_d, idx_ni_d;
//
char *symb, **nz_sy, **n_arq;
char str_tmp[100];
char str_float[30];
int nbint, nbuint, nbhuint, nbfloat, nbefloat; // tamanho em bytes dos tipos basicos
curandState *seed_d;
float zero = 0.0; // para facilitar impressao da matriz de distancias
char arquivo[100];
// Forward function declarations
long long start_timer();
long long stop_timer(long long start_time, char *name);
// print tree in newick format
char *toNewick(int raiz);
// find next prime number greater than n
int nextprime( int n );
// kernel
__global__ void Mutate_tree_Gpu(int nnos, int hnnos, int pos_ins, int idx_ni, int *nz, float *nz_br, float *nz_dr, float *nz_de, int *nz_qf, int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2, unsigned int *nz_sig, unsigned int *nz_hsig, unsigned int *nz_hval, curandState *states, unsigned long seed);
// auxiliary kernel functions
__device__ int quadratic_probing_insert(unsigned int *nz_hsig, unsigned int *nz_hval, unsigned int sig, int val, int hnnos);
__device__ int quadratic_probing_search(unsigned int *nz_hsig, unsigned int *nz_hval, unsigned int sig, int hnnos);
// Main program
int main()
{
symb = (char *) malloc(50);
fp = fopen("wellParser.out", "r");
if (fp == NULL) {
printf("\nCannot open file\n");
exit(0);
}
fscanf(fp,"%d %d", &nb, &nnos); // numero de (arvores) blocos a serem usados no kernel
printf("Num. arvores: %d, cada uma com %d nós\n", nb, nnos);
nfol = nnos / 2;
nz = (int *) malloc(nnos * sizeof(int));
nz_sy = (char **) malloc(nnos * sizeof(char *));
nz_dr = (float *) malloc(nnos * sizeof(float));
ennos = (nfol * (nfol - 1)) / 2;
nz_de = (float *) malloc(ennos * sizeof(float));
nz_br = (float *) malloc(nnos * sizeof(float));
nz_qf = (int *) malloc(nnos * sizeof(int));
nz_qe = (int *) malloc(nnos * sizeof(int));
nz_p = (int *) malloc(nnos * sizeof(int));
nz_f1 = (int *) malloc(nnos * sizeof(int));
nz_f2 = (int *) malloc(nnos * sizeof(int));
hnnos = nextprime(2*nnos);
nz_sig = (unsigned int *) malloc(hnnos * sizeof(unsigned int));
nz_hsig = (unsigned int *) malloc(hnnos * sizeof(unsigned int));
nz_hval = (unsigned int *) malloc(hnnos * sizeof(unsigned int));
n_arq = (char **) malloc(nb * sizeof(char *)); // guarda nome dos arquivos das arvores
nbint = nnos * sizeof(int);
nbuint = nnos * sizeof(unsigned int);
nbhuint = hnnos * sizeof(unsigned int);
nbfloat = nnos * sizeof(float);
nbefloat = ennos * sizeof(float);
cudaMalloc((void **)&nz_d, nb * nbint);
cudaMalloc((void **)&nz_br_d, nb * nbfloat);
cudaMalloc((void **)&nz_dr_d, nb * nbfloat);
cudaMalloc((void **)&nz_de_d, nb * nbefloat);
cudaMalloc((void **)&nz_qf_d, nb * nbint);
cudaMalloc((void **)&nz_qe_d, nb * nbint);
cudaMalloc((void **)&nz_p_d, nb * nbint);
cudaMalloc((void **)&nz_f1_d, nb * nbint);
cudaMalloc((void **)&nz_f2_d, nb * nbint);
cudaMalloc((void **)&nz_sig_d, nb * nbuint);
cudaMalloc((void **)&nz_hsig_d, nb * nbhuint);
cudaMalloc((void **)&nz_hval_d, nb * nbhuint);
cudaMalloc((void **)&seed_d, nb * nnos*sizeof(curandState));
if( nz_d==0 || nz_br_d==0 || nz_dr_d==0 || nz_de_d==0 || nz_qf_d==0 || nz_qe_d==0 || nz_p_d==0 || nz_f1_d==0 || nz_f2_d==0 || nz_sig_d==0 || nz_hsig_d==0 || nz_hval_d==0 ) {
printf("couldn't allocate memory\n");
return 1;
}
////
for(na=0; na < nb; na++) { // na = numero de arvores
fscanf(fp,"%s", symb);
if (na == 0) { printf("Arquivo: %s\n", symb); }
n_arq[na] = (char *) malloc(50);
strcpy(n_arq[na], symb);
fscanf(fp,"%d %d", &nnos, &idx_ni);
if (na == 0) { printf("No nos: %d, Indice no interno: %d\n", nnos, idx_ni); }
fscanf(fp,"%d %d", &pos_ins, &n_ins);
if (na == 0) { printf("Pos inic: %d, No insercoes: %d\n", pos_ins, n_ins); }
if (na == 0) { printf("Arvore: "); }
for(i=0; i<nnos; i++) {
fscanf(fp,"%d", &nz[i]);
if (na == 0) { printf("%d ", nz[i]); }
}
if (na == 0) { printf("\n"); }
if (na == 0) { printf("Simbolos: "); }
for(i=0; i<nnos; i++) {
fscanf(fp,"%s", symb);
nz_sy[i] = (char *) malloc(50);
strcpy(nz_sy[i], symb);
if (na == 0) { printf("%s ", nz_sy[i]); }
}
if (na == 0) { printf("\n"); }
if (na == 0) { printf("Ramos: "); }
for(i=0; i<nnos; i++) {
fscanf(fp,"%f", &nz_br[i]);
if (na == 0) { printf("%f ", nz_br[i]); }
}
if (na == 0) { printf("\n"); }
if (na == 0) { printf("No Filhos: "); }
for(i=0; i<nnos; i++) {
fscanf(fp,"%d", &nz_qf[i]);
if (na == 0) { printf("%d ", nz_qf[i]); }
}
if (na == 0) { printf("\n"); }
if (na == 0) { printf("No Especies: "); }
for(i=0; i<nnos; i++) {
fscanf(fp,"%d", &nz_qe[i]);
if (na == 0) { printf("%d ", nz_qe[i]); }
}
if (na == 0) { printf("\n"); }
if (na == 0) { printf("Pais: "); }
for(i=0; i<nnos; i++) {
fscanf(fp,"%d", &nz_p[i]);
if (na == 0) { printf("%d ", nz_p[i]); }
}
if (na == 0) { printf("\n"); }
if (na == 0) { printf("Filhos 1: "); }
for(i=0; i<nnos; i++) {
fscanf(fp,"%d", &nz_f1[i]);
if (na == 0) { printf("%d ", nz_f1[i]); }
}
if (na == 0) { printf("\n"); }
if (na == 0) { printf("Filhos 2: "); }
for(i=0; i<nnos; i++) {
fscanf(fp,"%d", &nz_f2[i]);
if (na == 0) { printf("%d ", nz_f2[i]); }
}
if (na == 0) { printf("\n"); }
for(i=0; i<hnnos; i++) {
nz_sig[i] = 0;
}
for(i=0; i<hnnos; i++) {
nz_hsig[i] = (unsigned int) EMPTY;
nz_hval[i] = (unsigned int) EMPTY;
}
if (na == 0) { toNewick(nnos-1); printf(";\n"); }
if (na > 0) {
for(j = 0; j < nnos; j++) {
if (nz[j] >= 0) nz[j] = nz[j] + na*nnos;
if (nz_p[j] >= 0) nz_p[j] = nz_p[j] + na*nnos;
if (nz_f1[j] >= 0) nz_f1[j] = nz_f1[j] + na*nnos;
if (nz_f2[j] >= 0) nz_f2[j] = nz_f2[j] + na*nnos;
}
nz[nfol] = -na;
}
// move data to GPU
cudaMemcpy(nz_d + (na*nnos), nz, nbint, cudaMemcpyHostToDevice);
cudaMemcpy(nz_br_d + (na*nnos), nz_br, nbfloat, cudaMemcpyHostToDevice);
cudaMemcpy(nz_dr_d + (na*nnos), nz_dr, nbfloat, cudaMemcpyHostToDevice);
cudaMemcpy(nz_de_d + (na*ennos), nz_de, nbefloat, cudaMemcpyHostToDevice);
cudaMemcpy(nz_qf_d + (na*nnos), nz_qf, nbint, cudaMemcpyHostToDevice);
cudaMemcpy(nz_qe_d + (na*nnos), nz_qe, nbint, cudaMemcpyHostToDevice);
cudaMemcpy(nz_p_d + (na*nnos), nz_p, nbint, cudaMemcpyHostToDevice);
cudaMemcpy(nz_f1_d + (na*nnos), nz_f1, nbint, cudaMemcpyHostToDevice);
cudaMemcpy(nz_f2_d + (na*nnos), nz_f2, nbint, cudaMemcpyHostToDevice);
cudaMemcpy(nz_sig_d + (na*nnos), nz_sig, nbuint, cudaMemcpyHostToDevice);
cudaMemcpy(nz_hsig_d + (na*hnnos), nz_hsig, nbhuint, cudaMemcpyHostToDevice);
cudaMemcpy(nz_hval_d + (na*hnnos), nz_hval, nbhuint, cudaMemcpyHostToDevice);
}
fclose(fp);
//
GPU_start_time = start_timer();
// call kernel
Mutate_tree_Gpu<<<nb, nfol>>>(nnos, hnnos, pos_ins, idx_ni, nz_d, nz_br_d, nz_dr_d, nz_de_d, nz_qf_d, nz_qe_d, nz_p_d, nz_f1_d, nz_f2_d, nz_sig_d, nz_hsig_d, nz_hval_d, seed_d, time(NULL));
cudaThreadSynchronize(); // this is only needed for timing purposes
GPU_time = stop_timer(GPU_start_time, "\t Total");
//
p = nb-(nb/2)+(nb/4)-1;
// copy data back to the CPU
cudaMemcpy(nz, nz_d+p*nnos, nbint, cudaMemcpyDeviceToHost);
cudaMemcpy(nz_br, nz_br_d+p*nnos, nbfloat, cudaMemcpyDeviceToHost);
cudaMemcpy(nz_dr, nz_dr_d+p*nnos, nbfloat, cudaMemcpyDeviceToHost);
cudaMemcpy(nz_de, nz_de_d+p*ennos, nbefloat, cudaMemcpyDeviceToHost);
cudaMemcpy(nz_qf, nz_qf_d+p*nnos, nbint, cudaMemcpyDeviceToHost);
cudaMemcpy(nz_qe, nz_qe_d+p*nnos, nbint, cudaMemcpyDeviceToHost);
cudaMemcpy(nz_p, nz_p_d+p*nnos, nbint, cudaMemcpyDeviceToHost);
cudaMemcpy(nz_f1, nz_f1_d+p*nnos, nbint, cudaMemcpyDeviceToHost);
cudaMemcpy(nz_f2, nz_f2_d+p*nnos, nbint, cudaMemcpyDeviceToHost);
cudaMemcpy(nz_sig, nz_sig_d+p*nnos, nbuint, cudaMemcpyDeviceToHost);
cudaMemcpy(nz_hsig, nz_hsig_d+p*hnnos, nbhuint, cudaMemcpyDeviceToHost);
cudaMemcpy(nz_hval, nz_hval_d+p*hnnos, nbhuint, cudaMemcpyDeviceToHost);
if (p > 0) {
for(j = 0; j < nnos; j++) {
if (nz[j] >= 0) nz[j] = nz[j] - p*nnos;
if (nz_p[j] >= 0) nz_p[j] = nz_p[j] - p*nnos;
if (nz_f1[j] >= 0) nz_f1[j] = nz_f1[j] - p*nnos;
if (nz_f2[j] >= 0) nz_f2[j] = nz_f2[j] - p*nnos;
}
}
printf("Arquivo: %s\n", n_arq[p]);
toNewick(nnos-1);
printf(";\n");
//
printf("Arvore: ");
for(i=0; i<nnos; i++) {
printf("%d ", nz[i]);
}
printf("\n");
//
printf("Pais: ");
for(i=0; i<nnos; i++) {
printf("%d ", nz_p[i]);
}
printf("\n");
printf("f1: ");
for(i=0; i<nnos; i++) {
printf("%d ", nz_f1[i]);
}
printf("\n");
printf("f2: ");
for(i=0; i<nnos; i++) {
printf("%d ", nz_f2[i]);
}
printf("\n");
printf("Dst Raiz: ");
for(i=0; i<nnos; i++) {
if (i == nfol) continue; // desconta o no da posicao nfol
printf("%.2f ", nz_dr[i]); // pois este nao e usado
}
printf("\n");
printf("Assinatura: ");
for(i=0; i<nnos; i++) {
if (i == nfol) continue;
printf("%u ", nz_sig[i]);
}
printf("\n");
printf("Hash Sign: ");
for(i=0; i<hnnos; i++) {
if (i == nfol) continue;
printf("%u ", nz_hsig[i]);
}
printf("\n");
printf("Hash Val: ");
for(i=0; i<hnnos; i++) {
if (i == nfol) continue;
printf("%u ", nz_hval[i]);
}
printf("\n");
e = 0; // indexa a matriz triangular superior (representada num array) que contem a distancia
// entre as especies
printf("Distancias: \n");
printf("%7s ", nz_sy[0]);
for(i=1; i<nfol; i++)
printf("%4s\t", nz_sy[i]);
printf("\n");
for(i=0; i<nfol; i++) {
for(j=0; j<=i; j++)
printf("%.5f\t", zero);
for(k=i+1; k<nfol; k++) {
printf("%.5f\t", nz_de[e]);
e++;
}
printf("\n");
}
printf("\n");
//
free(nz);
free(nz_br);
free(nz_dr);
free(nz_de);
free(nz_p);
free(nz_f1);
free(nz_f2);
free(nz_sig);
free(nz_hsig);
free(nz_hval);
free(symb);
free(nz_sy);
free(nz_qf);
free(nz_qe);
//
cudaFree(nz_d);
cudaFree(nz_br_d);
cudaFree(nz_dr_d);
cudaFree(nz_de_d);
cudaFree(nz_qf_d);
cudaFree(nz_qe_d);
cudaFree(nz_p_d);
cudaFree(nz_f1_d);
cudaFree(nz_f2_d);
cudaFree(nz_sig_d);
cudaFree(nz_hsig_d);
cudaFree(nz_hval_d);
//
return 0;
}
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
printf("%s: %.5f sec\n", name, ((float) (end_time - start_time)) / (1000 * 1000));
return end_time - start_time;
}
char *toNewick(int idRaiz) {
strcpy(str_tmp,"");
strcpy(str_float,"");
if (nz_f1[idRaiz] < 0) { // Não tem filhos
strcat (str_tmp, nz_sy[idRaiz]);
strcat (str_tmp, ":");
// sprintf(str_float,"%0.2f", nz_br[idRaiz]);
sprintf(str_float,"%f", nz_br[idRaiz]);
strcat (str_tmp, str_float);
return str_tmp;
} else { // Tem filhos
printf("(");
printf("%s", toNewick(nz_f1[idRaiz]));
printf(",");
printf("%s", toNewick(nz_f2[idRaiz]));
printf(")");
printf("%s", nz_sy[idRaiz]);
printf(":");
// sprintf(str_float,"%0.2f", nz_br[idRaiz]);
sprintf(str_float,"%f", nz_br[idRaiz]);
printf("%s", str_float);
return "";
}
}
int nextprime( int n ) {
int Divisor, PossiblePrime;
int FoundPrime;
PossiblePrime = n;
if( PossiblePrime <= 2 )
PossiblePrime = 2;
else
if( PossiblePrime != 3 ) {
if( PossiblePrime % 2 == 0 )
PossiblePrime++; /* Need An Odd Number */
for( ; ; PossiblePrime += 2 ) {
FoundPrime = !TRUE;
for( Divisor = 3; PossiblePrime % Divisor; Divisor += 2 )
if( Divisor * Divisor > PossiblePrime ) {
FoundPrime = TRUE;
break;
}
if( FoundPrime )
break;
}
}
return PossiblePrime;
}
__device__ int quadratic_probing_insert(unsigned int *nz_hsig, unsigned int *nz_hval, unsigned int sig, int val, int hnnos) {
unsigned int j, hk, old;
int ib = blockIdx.x; // identificador do bloco
j = 0;
hk = sig % hnnos;
while(j < hnnos) {
old = atomicCAS(&nz_hsig[hk+ib*hnnos], UINT_MAX, sig); // se posicao estiver vazia (UINT_MAX = EMPTY)
if (old == UINT_MAX) {
nz_hval[hk+ib*hnnos] = val;
return (hk+ib*hnnos);
}
j++;
hk = (hk + j * j) % hnnos;
// hk = (hk + j) % hnnos;
}
return (-1);
}
__device__ int quadratic_probing_search(unsigned int *nz_hsig, unsigned int *nz_hval, unsigned int sig, int hnnos) {
unsigned int j, hk;
int ib = blockIdx.x; // identificador do bloco
j = 0;
hk = sig % hnnos;
while(j < hnnos) {
if (nz_hsig[hk+ib*hnnos] == sig) {
return (nz_hval[hk+ib*hnnos]);
}
j++;
hk = (hk + j * j) % hnnos;
// hk = (hk + j) % hnnos;
}
return (-1);
}
// estas duas funcoes sao usada para mapear os indices de um array para uma matriz triangular
// superior correspondente (sem a diagonal). para uma matriz nxn, o array terá n(n-1)/2 elementos
__host__ __device__ int row_index( int i, int M ){ // retorna o indice da linha
M--;
float m = M;
float row = (-2*m - 1 + sqrt( (4*m*(m+1) - 8*(float)i - 7) )) / -2;
if( row == (float)(int) row ) row -= 1;
return (int) row;
}
__host__ __device__ int column_index( int i, int M ){ // retorna o indice da coluna
int row = row_index( i, M);
M--;
return 1 + (i - M * row + row*(row+1) / 2);
}
__global__ void Mutate_tree_Gpu(int nnos, int hnnos, int pos_ins, int idx_ni, int *nz, float *nz_br, float *nz_dr, float *nz_de, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2, unsigned int *nz_sig, unsigned int *nz_hsig, unsigned int *nz_hval, curandState *states, unsigned long seed) {
float y; // acumula soma das arestas
int nfol; // numero de folhas da arvore
int j; // indice para thread ativa
int a, b; // usados no calculo da faixa de elementos (da matriz triangular) a serem considerados
unsigned int sig1, sig2, sig3, sig4; // assinaturas de tres nos - da o caminho em bits ate o raiz
int bit; // contem bit sendo analizado
int ancc; // indice do ancestral comum
int nthreads; // numero de threads ativas
int r, c; // linha e coluna da matriz triangular superior
int bits; // conta quantos bits sao iguais
int i = threadIdx.x; // identificador da thread
int ib = blockIdx.x; // identificador do bloco
int it; // indice de acesso global das threads
int ennos; // tamanho da matriz de distancias
nfol = nnos / 2; // folhas estao na metade inferior
ennos = (nfol * (nfol - 1)) / 2;
it = i + ib*nnos;
if (i < nfol) { // nos folhos calculam distancia ate a raiz e armazena o caminho (assinatura
// em bits) até a raiz
y = 0;
j = it; // associa threads com nos folhas
nz_sig[it] = 1;
while (j != -1) {
y = y + nz_br[j]; // acumula a distancia
if (nz_p[j] == -1)
break;
nz_sig[it] <<= 1; // acumula o caminho
if (nz_f1[nz_p[j]] == j) // acrescenta 0 se vier da direita (f2)
nz_sig[it]++; // ou 1 se vier da esquerda (f1)
j = nz_p[j];
}
quadratic_probing_insert(nz_hsig, nz_hval, nz_sig[it], it, hnnos);
nz_dr[it] = y;
}
__syncthreads(); // espera todas as threads chegarem até aqui
if (i < (nfol-1)) { // nos internos calculam distancia ate a raiz e armazena o caminho
// (assinatura em bits) até o raiz
y = 0;
j = it+nfol+1; // associa threads com os nos internos
nz_sig[j] = 1;
if (nz_p[j] == -1) j = -1;
while (j != -1) {
y = y + nz_br[j]; // acumula a distancia
if (nz_p[j] == -1)
break;
nz_sig[it+nfol+1] <<= 1; // acumula o caminho
if (nz_f1[nz_p[j]] == j) // acrescenta 0 se vier da direita (f2)
nz_sig[it+nfol+1]++; // ou 1 se vier da esquerda (f1)
j = nz_p[j];
}
quadratic_probing_insert(nz_hsig, nz_hval, nz_sig[it+nfol+1], (it+nfol+1), hnnos);
nz_dr[it+nfol+1] = y;
}
__syncthreads(); // espera todas as threads chegarem até aqui
// se nfol (numero de especies) for impar, usamos nfol threads
// se nfol (numero de especies) for par, usamos nfol-1 threads
// isso evita termos que tratar de elementos restantes
if ( (nfol % 2) == 0) {
nthreads = nfol - 1; // nfol é par: cada thread calcula nfol/2 distancias
a = nfol / 2; // quantidade de elementos por thread
} else {
nthreads = nfol; // nfol é ímpar: cada thread calcula (nfol-1)/2 distancias
a = (nfol - 1) / 2; // quantidade de elementos por thread
}
if (i < nthreads) {
for( b = i*a; b < a+(i*a); b++) {
r = row_index(b, nfol);
c = column_index(b, nfol);
sig1 = nz_sig[r+ib*nnos];
sig2 = nz_sig[c+ib*nnos];
sig3 = 1; // inicia com 1 para diferenciar das demais assinaturas, i.e., 10, 100 etc
bits = 0; // conta quantos bits sao iguais
sig4 = 1; // recebe assinatura invertida
while ( (sig1 & 1) == (sig2 & 1) && bits < 32) { // compara bit menos significativo
bit = (sig1 & 1);
bits++;
sig1 >>= 1; // avanca para proximo bit
sig2 >>= 1; // avanca para proximo bit
sig3 <<= 1; // armazena bits coincidentes - caminho do ancestral comum
if (bit)
sig3++; // soma 1 ou 0
}
while (bits>0) { // inverte a assinatura coincidente incluindo um 1 mais a esquerda
sig4 <<= 1;
if (sig3 & 1)
sig4++;
sig3 >>= 1;
bits--;
}
ancc = quadratic_probing_search(nz_hsig, nz_hval, sig4, hnnos);
nz_de[b+ib*ennos] = nz_dr[r+ib*nnos] + nz_dr[c+ib*nnos] - 2*nz_dr[ancc];
}
}
}
|
5,616 | #include <algorithm>
#include <cmath>
#include <cstdio>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include <sys/time.h>
#include <chrono>
#include <dirent.h>
using namespace std::chrono;
using namespace std;
vector<int> G_timestamps;
int getCurrentTime () {
return duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
}
void F_TIME_START () {
G_timestamps.push_back(getCurrentTime());
}
void F_TIME_END (string measuredName) {
int start = G_timestamps.back();
int end = getCurrentTime();
float diff = (end - start) / 1000.0;
G_timestamps.pop_back();
cout << endl << "## [" << measuredName << "]: " << diff << "s" << endl << endl;
}
void coutGPUStatus () {
size_t freem, totalm;
float free_m, total_m, used_m;
cudaMemGetInfo((size_t*)&freem, (size_t*)&totalm);
free_m = (size_t) freem / 1048576.0;
total_m = (size_t) totalm / 1048576.0;
used_m = total_m - free_m;
printf ( "## Total: %f MB. Used %f MB. Free: %f MB. \n", total_m, used_m, free_m);
}
void coutResult(int& generation, int& max_fitness_value) {
cout << "Generation " << generation << ", currently best individual can activate " << max_fitness_value << " others" << endl;
}
void coutPopulation (vector <vector<int>>& population) {
cout << "Population:";
for (int i=0; i<population.size(); i++) {
cout << "\nIndiv: " << i << ": ";
for (int j=0; j<population[i].size(); j++) {
if (population[i][j] < 10) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 100) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 1000) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 10000) {
cout << population[i][j] << ", ";
}
else {
cout << population[i][j] << ",";
}
}
}
cout << "\n\n";
}
void coutIndividual (vector <vector<int>>& population, int i) {
cout << "Individual " << i << ":";
for (int j=0; j<population[i].size(); j++) {
if (population[i][j] < 10) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 100) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 1000) {
cout << population[i][j] << ", ";
}
else if (population[i][j] < 10000) {
cout << population[i][j] << ", ";
}
else {
cout << population[i][j] << ",";
}
}
cout << "\n\n";
}
float getInfluenceValue (int N, int inf_values_size, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, int x, int y) {
float infValue = 0;
int min = inf_row_ptr[x];
int max = x == N-1 ? inf_values_size-1 : inf_row_ptr[x+1]; //inf_values_size-1
for (int i=min; i<max; i++) {
if (inf_col_ind[i] == y) {
infValue = inf_values[i];
break;
}
}
return infValue;
}
void InfluenceSpreadPopulationStep (bool *dyn_activeNodesPerIndividual, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, int N, int nrOfChangedIndividuals, int inf_values_size, float INFLUENCE_THRESHOLD, vector<int>& changedIndividuals) {
for (int indiv_id = 0; indiv_id < nrOfChangedIndividuals; indiv_id++) {
for (int node_id = 0; node_id < N; node_id++) {
int indiv_index = changedIndividuals[indiv_id];
float infValue = 0; // total value of influence on the node
for (int i=0; i<N; i++) {
if (dyn_activeNodesPerIndividual[indiv_index * N + i] && node_id != i) { // if i-th element is active and is not the node
float result = getInfluenceValue(N, inf_values_size, inf_values, inf_col_ind, inf_row_ptr, i, node_id);
infValue += result; // add i-th element influence on the node
//printf("Influence %d on %d is: %f\n", i, node_id, result);
//printf("\ninfValue: %f, id: %d", infValue, id);
}
}
//printf("\ninfValue: %f, id: %d", infValue, id);
if (infValue >= INFLUENCE_THRESHOLD) { // if total influence on the node is greater than or equal to the INFLUENCE_THRESHOLD value
dyn_activeNodesPerIndividual[indiv_index * N + node_id] = true; // activate the node
}
}
}
}
vector <vector<float>> readData (string dataset_name, int N, string _EXPERIMENT_ID) {
vector <vector<float>> influence;
// initialization of the influence vector
for (int i=0; i<N; i++) {
cout << endl << i << " out of " << N << endl;
vector<float> row(N, 0);
influence.push_back(row);
if ((i + 1) * N % (N * N / 10) == 0) {
cout << "[Initialization of the influence matrix]: " << float((i + 1) * N) / (N * N) * 100 << "%" << endl;
}
}
// total number of interactions received by every node
vector<float> received(N, 0);
ifstream infile("./experiments_" + _EXPERIMENT_ID + "/" + dataset_name);
string line;
int _csv_id_hack = -1;
if (dataset_name.find(".csv") != std::string::npos) {
_csv_id_hack = 0;
}
if (infile.good()) {
int line_nr = 0;
while (getline(infile, line)) {
cout << "Reading raw data file, line nr: " << line_nr << endl;
//cout << line << endl;
istringstream iss(line);
int a, b;
if (!(iss >> a >> b)) { cout << "ERROR" << endl; break; } // error
if (a != b && a + _csv_id_hack < N && b + _csv_id_hack < N) {
influence[a + _csv_id_hack][b + _csv_id_hack] += 1; // temp inf_values, calculating the total number of iteractions from "i" to "j"
received [b + _csv_id_hack] += 1;
//cout << "message from " << a + _csv_id_hack << " to " << b + _csv_id_hack << endl;
}
line_nr++;
}
infile.close();
cout << "File reading finished successfully." << endl;
ofstream outfile ("./experiments-counted/" + dataset_name + "_influenceCounted_" + to_string(N));
if (outfile.good()) {
// Influence value calculated as the ratio of iteractions from "i" node to "j" node, to the total number of iteractions to the "j" node.
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
//cout << "Influence values calculations, step: " << i*N+(j+1) << "/" << N*N << endl;
if (i == j) {
outfile << i << " " << j << " " << -1 << "\n";
influence[i][j] = -1;
} else if (influence[i][j] > 0) {
if (received[j] != 0) {
influence[i][j] = influence[i][j] / received[j];
} else if (influence[i][j] != 0) {
cout << "Received array error";
}
/*cout << i << "'s influence on " << j << " equals: " << influence[i][j] << endl;*/
outfile << i << " " << j << " " << influence[i][j] << "\n";
} else {
influence[i][j] = 0;
}
}
}
cout << "Compressed file saved successfully." << endl;
outfile.close();
} else {
throw std::invalid_argument("readData - File " + dataset_name + " not saved.");
}
} else {
throw std::invalid_argument("readData - File " + dataset_name + " not found.");
}
return influence;
}
void defineInfluenceArrayAndVectors (string dataset_name, int N, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, string _EXPERIMENT_ID) {
//cout << "File reading started." << endl;
ifstream infile("./experiments-counted/" + dataset_name + "_influenceCounted_" + to_string(N));
if (infile.good()) { // reading the already calculated influence values
int line_nr = 0;
string line;
float last_a = -1;
while (getline(infile, line)) {
cout << "Reading influence file, line nr: " << line_nr << endl;
istringstream iss(line);
float a, b, c;
if (!(iss >> a >> b >> c)) { break; } // error
if (c != 0) {
if (a != last_a) {
inf_row_ptr.push_back(inf_values.size());
//cout << "add row ptr: " << inf_values.size() << endl;
last_a = a;
}
inf_values.push_back(c);
//cout << "add value: " << c << endl;
inf_col_ind.push_back(b);
//cout << "add col ind: " << b << endl;
}
line_nr++;
}
infile.close();
} else { // calculating influnce values
infile.close();
vector <vector<float>> influence = readData(dataset_name, N, _EXPERIMENT_ID);
// inf_values, inf_col_ind, inf_row_ptr creation, based on the influence array
for (int i=0; i<N; i++) {
bool added = false;
for (int j=0; j<N; j++) {
//cout << "Influence of " << i << " on " << j << " is equal to: " << influence[i][j] << endl;
if (influence[i][j] != 0) {
if (!added) {
inf_row_ptr.push_back(inf_values.size());
//cout << "add row ptr: " << inf_values.size() << endl;
added = true;
}
inf_values.push_back(influence[i][j]);
//cout << "add value: " << influence[i][j] << endl;
inf_col_ind.push_back(j);
//cout << "add col ind: " << j << endl;
}
}
if (!added) {
//inf_row_ptr.push_back(-1);
}
}
/*cout << "\n\n size of influence array: " << sizeof(influence) + sizeof(float) * influence.capacity() * influence.capacity();
cout << "\n\n Total size of vectors: "
<< sizeof(inf_values) + sizeof(float) * inf_values.capacity()
+ sizeof(inf_col_ind) + sizeof(float) * inf_col_ind.capacity()
+ sizeof(inf_row_ptr) + sizeof(float) * inf_row_ptr.capacity() << "\n\n";*/
}
}
void createPopulation (int nrOfIndividuals, int N, int toFind, vector <vector<int>>& population) {
// creating random individuals within population
for (int i = 0; i<nrOfIndividuals; i++) {
vector<int> row;
population.push_back(row);
cout << "Creating individual " << i << " of " << nrOfIndividuals << endl;
for (int j = 0; j<toFind; j++) {
int rand_id = rand() % N;
bool alreadyAdded = true;
while (alreadyAdded) {
alreadyAdded = false;
for (int k=0; k<population[i].size(); k++) {
if (population[i][k] == rand_id) {
alreadyAdded = true;
rand_id = rand() % N;
}
}
}
//cout << "pushing: " << rand_id << endl;
population[i].push_back(rand_id);
}
}
}
void createPopulationSample (int nrOfIndividuals, int N, int toFind, vector <vector<int>>& population) {
// creating one individual - used as a sample e.g. for GPU vs CPU tests
vector<int> row;
population.push_back(row);
for (int x = 0; x<toFind; x++) {
population[0].push_back(x);
}
}
void setPopulationFitness (vector<vector<int>>& population, int nrOfIndividuals, int N, int inf_values_size, float& INFLUENCE_THRESHOLD, int STEPS_MAX, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, int toFind, vector<int>& fitness, int THREADS_PER_BLOCK) {
//bool activeNodesPerIndividual[nrOfIndividuals][N];
bool *dyn_activeNodesPerIndividual = new bool[nrOfIndividuals*N];
for (int i=0; i<nrOfIndividuals; i++) {
for (int j=0; j<N; j++) {
int index = N * i + j;
dyn_activeNodesPerIndividual[index] = false;
}
for (int j=0; j<toFind; j++) {
int index = N * i + population[i][j];
dyn_activeNodesPerIndividual[index] = true;
}
}
int active [nrOfIndividuals];
vector<int> changedIndividuals;
for (int i=0; i<nrOfIndividuals; i++) {
active[i] = toFind;
changedIndividuals.push_back(i);
}
int step_counter = 0;
while (step_counter < STEPS_MAX && changedIndividuals.size() > 0) {
//cout << "Step: " << step_counter << " / " << STEPS_MAX << endl;
int nrOfChangedIndividuals = changedIndividuals.size();
cout << "nrOfChangedIndividuals " << nrOfChangedIndividuals << endl;
F_TIME_START();
InfluenceSpreadPopulationStep (dyn_activeNodesPerIndividual, inf_values, inf_col_ind, inf_row_ptr, N, nrOfChangedIndividuals, inf_values_size, INFLUENCE_THRESHOLD, changedIndividuals);
F_TIME_END("host functions");
changedIndividuals.clear();
int curr_active;
for (int i=0; i<nrOfIndividuals; i++) {
curr_active = 0;
for (int j=0; j<N; j++) {
int index = N * i + j;
if (dyn_activeNodesPerIndividual[index]) {
curr_active++;
}
}
if (curr_active != active[i]) {
changedIndividuals.push_back(i);
}
active[i] = curr_active;
}
step_counter++;
}
for (int i = 0; i < nrOfIndividuals; i++) {
int individualFitness = 0;
for (int j = 0; j < N; j++) {
int index = N * i + j;
if (dyn_activeNodesPerIndividual[index]) {
individualFitness++;
//cout << "Activated " << j << endl;
}
}
//cout << "individualFitness: " << individualFitness << endl;
//cout << "toFind: " << toFind << endl;
// acceptable `error`
/*if (individualFitness-toFind < 0) {
cout << "# Crossover/mutation overlapping" << endl; // can happen because of random crossover and mutation
//coutIndividual(population, i);
}*/
//cout << "fitness Indiv: " << i << ": " << individualFitness-toFind << endl;
fitness.push_back(individualFitness-toFind);
}
}
void performPopulationSelection (vector<vector<int>>& population, int& nrOfIndividuals, int N, int inf_values_size, float& INFLUENCE_THRESHOLD, int& groupSize, int& STEPS_MAX, vector<float>& inf_values, vector<float>& inf_col_ind, vector<float>& inf_row_ptr, int& toFind, int& max_fitness_value, vector<int>& max_fitness_individual, int THREADS_PER_BLOCK) {
vector<int> fitness;
F_TIME_START();
setPopulationFitness(population, nrOfIndividuals, N, inf_values_size, INFLUENCE_THRESHOLD, STEPS_MAX, inf_values, inf_col_ind, inf_row_ptr, toFind, fitness, THREADS_PER_BLOCK);
F_TIME_END("selection - fitness count");
F_TIME_START();
vector<vector<int>> newPopulation;
while (newPopulation.size() != population.size()) {
vector<int> newGroup;
bool alreadyAdded[nrOfIndividuals];
for (int i=0; i<nrOfIndividuals; i++) {
alreadyAdded[i] = false;
}
for (int j=0; j<groupSize; j++) {
int randIndiv = rand() % nrOfIndividuals;
while (alreadyAdded[randIndiv]) {
randIndiv = rand() % nrOfIndividuals;
}
newGroup.push_back(randIndiv);
}
int curr_best_fitness = -1;
int curr_best_id = -1;
int currentFitness = -1;
for (int j=0; j<newGroup.size(); j++) {
currentFitness = fitness[newGroup[j]];
if (currentFitness > curr_best_fitness) {
curr_best_fitness = currentFitness;
curr_best_id = j;
}
}
newPopulation.push_back(population[newGroup[curr_best_id]]);
if (curr_best_fitness > max_fitness_value) {
max_fitness_individual = population[newGroup[curr_best_id]];
max_fitness_value = curr_best_fitness;
}
}
population = newPopulation;
F_TIME_END("selection - population swapping");
}
// TODO performCrossover on DEVICE (nrOfIndividuals/2 threads (from 0 to nr/2 - 1), ids: id*2, id*2+1
void performCrossover (vector<vector<int>>& population, int& nrOfIndividuals, float& crossover_ratio, int& toFind) {
float split_ratio = 0.5;
float split_point = split_ratio*toFind;
int id_first = -1;
int id_second = -1;
for (int i=0; i<nrOfIndividuals; i++) {
int cross = rand() % 100;
if (cross < crossover_ratio * 100) {
if (id_first == -1) {
id_first = i;
} else {
id_second = i;
}
}
if (id_second != -1) {
for (int j=0; j<split_point; j++) {
float temp = population[id_first][j];
population[id_first][j] = population[id_second][j];
population[id_second][j] = temp;
}
id_first = -1;
id_second = -1;
}
} // allows to node doubling (fitness = -1 can happen)
}
// TODO performMutation on DEVICE
void performMutation (vector<vector<int>>& population, int& nrOfIndividuals, float& mutation_ratio, float& mutation_potency, int& toFind, int N) {
for (int i=0; i<nrOfIndividuals; i++) {
int mutation = rand() % 100;
if (mutation < mutation_ratio * 100) {
for (int j=0; j<mutation_potency*toFind; j++) {
population[i][rand() % toFind] = rand() % N;
}
}
} // allows to node doubling (fitness = -1 can happen)
}
bool anyLimitReached(int resultBufferSize, float resultMinDiff, vector<int> &resultsBuffer, int generation, int generationsLimit, float timeLimit, int COMPUTATION_START_TIME, int result, int resultLimit) {
int now = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
float diff = (now - COMPUTATION_START_TIME) / 1000.0;
bool anyLimit =
(resultMinDiff > 0 && generation > resultBufferSize && result < resultsBuffer[0] * (1 + resultMinDiff))
|| (generationsLimit > 0 && generation >= generationsLimit)
|| (resultLimit > 0 && result >= resultLimit)
|| (timeLimit > 0 && diff >= timeLimit);
if (generation > 0) {
resultsBuffer.push_back(result);
}
if (generation > resultBufferSize) {
resultsBuffer.erase(resultsBuffer.begin());
//cout << endl << "Current resultsBuffer[0]: " << resultsBuffer[0] << endl;
}
return anyLimit;
}
vector<string> getFileNames (string path) {
DIR *pDIR;
struct dirent *entry;
vector<string> fileNames;
if (pDIR=opendir(path.c_str())) {
while (entry = readdir(pDIR)) {
if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
fileNames.push_back(entry->d_name);
}
}
closedir(pDIR);
}
return fileNames;
}
/* pearson, spearman */
float mean (vector<float> values) {
float sum = 0;
int size = values.size();
for (int i = 0; i < size; i++) {
sum += values[i];
}
return sum / size;
}
float pearson_numerator (vector<float> A, vector<float> B, float meanA, float meanB) {
float numerator = 0;
for (int i = 0; i < A.size(); i++) {
numerator += (A[i] - meanA) * (B[i] - meanB);
}
return numerator;
}
float pearson_denominator (vector<float> A, vector<float> B, float meanA, float meanB) {
float denominator1;
float denominator1_sum = 0;
float denominator2;
float denominator2_sum = 0;
for (int i = 0; i < A.size(); i++) {
denominator1_sum += pow(A[i] - meanA, 2);
}
for (int i = 0; i < B.size(); i++) {
denominator2_sum += pow(B[i] - meanB, 2);
}
denominator1 = pow(denominator1_sum, 0.5);
denominator2 = pow(denominator2_sum, 0.5);
if (denominator1 == 0 || denominator2 == 0)
cout << endl << endl << "##### ERROR: Denominator equal to 0 - probable cause: all result values are equal" << endl << endl;
return denominator1 * denominator2;
}
float pearson (vector<float> A, vector<float> B) {
if (A.size() != B.size()) {
cout << "ERROR - wrong vector lengths" << endl;
return -1;
}
float meanA = mean(A);
float meanB = mean(B);
float numerator = pearson_numerator(A, B, meanA, meanB);
float denominator = pearson_denominator(A, B, meanA, meanB);
return numerator / denominator;
}
vector<float> toRank (vector<float> A) {
vector<float> sorted = A;
sort(sorted.begin(), sorted.end());
vector<float> rank;
for (int i = 0; i < A.size(); i++) {
vector<int> positions;
for (int j = 0; j < A.size(); j++) {
if (sorted[j] == A[i]) {
positions.push_back(j);
}
}
float sum = 0;
float avg;
for (int j = 0; j < positions.size(); j++) {
sum += positions[j] + 1;
}
avg = sum / positions.size();
rank.push_back(avg);
//rank.push_back(positions[positions.size()-1] + 1); //libreoffice calc rank
}
/*
cout << "Ranking: " << endl;
for (int i = 0; i < rank.size(); i++) {
cout << rank[i] << ", ";
}
cout << endl << endl;
*/
return rank;
}
float spearman (vector<float> A, vector<float> B) {
vector<float> A_ranked = toRank(A);
vector<float> B_ranked = toRank(B);
return pearson(A_ranked, B_ranked);
}
int main (int argc, char* argv[]) {
srand (time(NULL));
coutGPUStatus();
string _EXPERIMENT_ID = argv[1];
int tests = 100;
float timeLimit = 6; //seconds
int generationsLimit = 0; //5;
int resultLimit = 0; //32;
int resultBufferSize = 10;
float resultMinDiff = 0; //0.01;
bool saveResults = true;
bool saveResultsCorrelation = true;
float INFLUENCE_THRESHOLD = 0.5;
int N_MAX = 1000;
int STEPS_MAX = 10000;
int TO_FIND_PERCENTAGE = 5;
int THREADS_PER_BLOCK = 1024;
/* Parameters */
//int groupSize = 20; // 10, 20, 30 // 2, 5, 10, 20, 50
//int nrOfIndividuals = (int)ceil(N/10.0); // N/20, N/10, N/5 // 100, 500 1k, 2k, 10k
//float crossover_ratio = 0.7; // 0.5, 0.7, 0.9 // 0.1, 0.3, 0.5, 0.7, 0.9
//float mutation_potency = 0.01; // 0.001, 0.01, 0.1 // 0.01, 0.02, 0.05, 0.1, 0.2
//float mutation_ratio = 0.9; // 0.75, 0.9, 0.95, // 0.1, 0.3, 0.5, 0.7, 0.9
vector<int> a_groupSize {10, 20, 30}; // 10, 20, 30
vector<int> a_nrOfIndividuals {12, 10, 8}; // N/12, N/10, N/8
vector<float> a_crossover_ratio {0.6, 0.7, 0.8}; // 0.6, 0.7, 0.8
vector<float> a_mutation_potency {0.001, 0.01, 0.1}; // 0.001, 0.01, 0.1
vector<float> a_mutation_ratio {0.7, 0.8, 0.9}; // 0.7, 0.8, 0.9
int parameters_sets = a_groupSize.size() * a_nrOfIndividuals.size() * a_crossover_ratio.size() * a_mutation_potency.size() * a_mutation_ratio.size();
vector<string> datasets = getFileNames("./experiments_" + _EXPERIMENT_ID);
/* DEBUG */
int debug_nrOfIndividuals;
bool debug = true;
if (debug) {
tests = 10;
N_MAX = 1000;
THREADS_PER_BLOCK = 1024;
debug_nrOfIndividuals = -1; // -1 - the same as if it wasn't a debug mode (so devides N by a_nrOfIndividuals to get indivnr)
// tests: 10, debug_nrOfIndividuals: -1, generationsLimit: 1, THREADS_PER_BLOCK: 1024, default parameters, facebook
/* 100: 7 in 1ms, 500: 46 in 10ms, 1000: 88 in 53ms */
timeLimit = 0;
generationsLimit = 5; // 5 - 80s
resultLimit = 0;
resultMinDiff = 0;
saveResults = true;//false;
saveResultsCorrelation = true;//false;
a_groupSize = {20};
a_nrOfIndividuals = {8};
a_crossover_ratio = {0.7};
a_mutation_potency = {0.01};
a_mutation_ratio = {0.9};
parameters_sets = a_groupSize.size() * a_nrOfIndividuals.size() * a_crossover_ratio.size() * a_mutation_potency.size() * a_mutation_ratio.size();
//datasets = {"facebook-46952"};
//datasets = {"BA-1000-1-3.csv"};
datasets = {"ER-1000-0.05-10.csv"};
//datasets = getFileNames("./experiments_" + _EXPERIMENT_ID);
}
/*
N = 1000
INDIVIDUALS = 1000
THREADS_PER_BLOCK = 192
1 individuals - 0.056s
10 individuals - 0.081s
100 individuals - 0.265s
1000 individuals - 2.483s
THREADS_PER_BLOCK = 512
1000 individuals - 2.423s
THREADS_PER_BLOCK = 1024
1000 individuals - 2.481s
N = max (~47k for facebook)
THREADS_PER_BLOCK = 512
100 individuals - 5.08s
*/
vector<vector<float>> results;
for (int i=0; i<datasets.size(); i++) {
vector<float> row(parameters_sets, -1);
results.push_back(row);
}
for (int file_id=0; file_id<datasets.size(); file_id++) {
int dataset_id = file_id; //TODO to refactor
string dataset_name = datasets[file_id];
stringstream ssname(dataset_name);
string token;
getline(ssname, token, '-');
getline(ssname, token, '-');
int maxSize = stoi(token);
int N = min(N_MAX, maxSize);
int toFind = (int)ceil(float(TO_FIND_PERCENTAGE * N) / 100.0);
// using ofstream constructors.
std::ofstream outfile("results_" + dataset_name + "_" + _EXPERIMENT_ID + "_" + ".xls");
if (saveResults) {
outfile << "<?xml version='1.0'?>" << std::endl;
outfile << "<Workbook xmlns='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:o='urn:schemas-microsoft-com:office:office'" << std::endl;
outfile << " xmlns:x='urn:schemas-microsoft-com:office:excel'" << std::endl;
outfile << " xmlns:ss='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:html='http://www.w3.org/TR/REC-html40'>" << std::endl;
outfile << " <Worksheet ss:Name='Sheet1'>" << std::endl;
outfile << " <Table>" << std::endl;
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>Dataset</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>Test nr</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>groupSize</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>nrOfIndividuals</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>crossover_ratio</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>mutation_potency</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>mutation_ratio</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>Generations</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>Result</Data></Cell>" << std::endl;
outfile << " </Row>" << std::endl;
}
vector <float> inf_col_ind;
vector <float> inf_row_ptr;
vector <float> inf_values;
defineInfluenceArrayAndVectors(dataset_name, N, inf_values, inf_col_ind, inf_row_ptr, _EXPERIMENT_ID);
int inf_values_size = inf_values.size();
int parameters_set = 1;
for_each(a_groupSize.begin(), a_groupSize.end(), [&] (int groupSize) {
for_each(a_nrOfIndividuals.begin(), a_nrOfIndividuals.end(), [&] (int nrOfIndividualsRaw) {
int nrOfIndividuals = (int)ceil(N/nrOfIndividualsRaw);
if (debug && debug_nrOfIndividuals != -1) {
nrOfIndividuals = debug_nrOfIndividuals;
}
for_each(a_crossover_ratio.begin(), a_crossover_ratio.end(), [&] (float crossover_ratio) {
for_each(a_mutation_potency.begin(), a_mutation_potency.end(), [&] (float mutation_potency) {
for_each(a_mutation_ratio.begin(), a_mutation_ratio.end(), [&] (float mutation_ratio) {
float testsResultsSum = 0;
float testsGenerationsSum = 0;
float testsTimeSum = 0;
for (int test = 0; test < tests; test++) {
vector <int> max_fitness_individual;
vector <vector<int>> population;
int max_fitness_value = -1;
int progressBarLength = 10;
int generation = 0;
vector<int> resultsBuffer;
createPopulation(nrOfIndividuals, N, toFind, population);
//createPopulationSample(nrOfIndividuals, N, toFind, population);
//coutPopulation(population);
int COMPUTATION_START_TIME = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
while (!anyLimitReached(resultBufferSize, resultMinDiff, resultsBuffer, generation, generationsLimit, timeLimit, COMPUTATION_START_TIME, max_fitness_value, resultLimit)) {
//coutGPUStatus();
F_TIME_START();
performPopulationSelection(population, nrOfIndividuals, N, inf_values_size, INFLUENCE_THRESHOLD, groupSize, STEPS_MAX, inf_values, inf_col_ind, inf_row_ptr, toFind, max_fitness_value, max_fitness_individual, THREADS_PER_BLOCK);
F_TIME_END("selection");
F_TIME_START();
performCrossover(population, nrOfIndividuals, crossover_ratio, toFind);
F_TIME_END("crossover");
F_TIME_START();
performMutation(population, nrOfIndividuals, mutation_ratio, mutation_potency, toFind, N);
F_TIME_END("mutation");
//coutResult(generation, max_fitness_value);
generation++;
}
int COMPUTATION_END_TIME = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
int COMPUTATION_DURATION = COMPUTATION_END_TIME - COMPUTATION_START_TIME;
cout << endl << "[FINISHED] test: " << test+1 << "/" << tests
<< " for parameters set nr: " << parameters_set << "/" << parameters_sets
<< " for dataset_id: " << dataset_id+1 << "/" << datasets.size()
<< " in: " << COMPUTATION_DURATION / 1000.0 << "s";
cout << endl;
coutGPUStatus();
cout << endl;
if (saveResults) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(parameters_set) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(test+1) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(groupSize) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(nrOfIndividuals) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(crossover_ratio) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(mutation_potency) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(mutation_ratio) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(generation) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(max_fitness_value) + "</Data></Cell>" << std::endl;
outfile << " </Row>" << std::endl;
}
//cout << endl << "result " << test+1 << ": " << max_fitness_value << endl;
testsResultsSum += max_fitness_value;
testsGenerationsSum += generation;
testsTimeSum += COMPUTATION_DURATION;
/*cout << "Best individual found: " << endl;
for (int i=0; i<max_fitness_individual.size(); i++) {
cout << max_fitness_individual[i] << ", ";
}*/
//cout << endl << endl << "This group can activate " << max_fitness_value << " others";
//cout << endl << "Time elapsed: " << (time2 - COMPUTATION_START_TIME) / 1000.0 << "s" << endl;
} // TEST
float finalResult = std::round(testsResultsSum / tests);
float finalGenerations = std::round(testsGenerationsSum / tests);
float finalTime = std::round(testsTimeSum / tests);
cout << endl << "Final result avg: " << finalResult << " in avg " << finalTime / 1000.0 << "s" << endl;
results[file_id][parameters_set-1] = finalResult;
if (saveResults) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(parameters_set) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>AVG </Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(groupSize) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(nrOfIndividuals) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(crossover_ratio) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(mutation_potency) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(mutation_ratio) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(finalGenerations) + "</Data></Cell>" << std::endl;
outfile << " <Cell><Data ss:Type='Number'>" + to_string(finalResult) + "</Data></Cell>" << std::endl;
outfile << " </Row>" << std::endl;
}
parameters_set++;
});
});
});
});
});
if (saveResults) {
outfile << " </Table>" << std::endl;
outfile << " </Worksheet>" << std::endl;
outfile << "</Workbook>" << std::endl;
}
outfile.close();
}
cout << endl << endl << "*** RESULTS ***" << endl;
for (int i=0; i<datasets.size(); i++) {
for (int j=0; j<parameters_sets; j++) {
cout << results[i][j] << ", ";
}
cout << endl;
}
if (saveResultsCorrelation) {
// using ofstream constructors.
std::ofstream outfile("results_correlation_" + _EXPERIMENT_ID + "_.xls");
outfile << "<?xml version='1.0'?>" << std::endl;
outfile << "<Workbook xmlns='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:o='urn:schemas-microsoft-com:office:office'" << std::endl;
outfile << " xmlns:x='urn:schemas-microsoft-com:office:excel'" << std::endl;
outfile << " xmlns:ss='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:html='http://www.w3.org/TR/REC-html40'>" << std::endl;
outfile << " <Worksheet ss:Name='Sheet1'>" << std::endl;
outfile << " <Table>" << std::endl;
outfile << " <Row>" << std::endl;
outfile << " <Cell></Cell>" << std::endl;
for (int i=0; i<datasets.size(); i++) {
outfile << " <Cell><Data ss:Type='String'>" + datasets[i] + "</Data></Cell>" << std::endl;
}
outfile << " </Row>" << std::endl;
for (int i=0; i<datasets.size(); i++) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>" + datasets[i] + "</Data></Cell>" << std::endl;
for (int j=0; j<datasets.size(); j++) {
if (j > i) {
outfile << " <Cell><Data ss:Type='Number'>" + to_string(pearson(results[i], results[j])) + "</Data></Cell>" << std::endl;
} else {
outfile << " <Cell></Cell>" << std::endl;
}
}
outfile << " </Row>" << std::endl;
}
outfile << " <Row></Row>" << std::endl;
outfile << " <Row></Row>" << std::endl;
outfile << " <Row></Row>" << std::endl;
outfile << " <Row>" << std::endl;
outfile << " <Cell></Cell>" << std::endl;
for (int i=0; i<datasets.size(); i++) {
outfile << " <Cell><Data ss:Type='String'>" + datasets[i] + "</Data></Cell>" << std::endl;
}
outfile << " </Row>" << std::endl;
for (int i=0; i<datasets.size(); i++) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>" + datasets[i] + "</Data></Cell>" << std::endl;
for (int j=0; j<datasets.size(); j++) {
if (j > i) {
outfile << " <Cell><Data ss:Type='Number'>" + to_string(spearman(results[i], results[j])) + "</Data></Cell>" << std::endl;
} else {
outfile << " <Cell></Cell>" << std::endl;
}
}
outfile << " </Row>" << std::endl;
}
outfile << " </Table>" << std::endl;
outfile << " </Worksheet>" << std::endl;
outfile << "</Workbook>" << std::endl;
outfile.close();
}
return 0;
}
|
5,617 | //
// Simple and fast atof (ascii to float) function.
//
// - Executes about 5x faster than standard MSCRT library atof().
// - An attractive alternative if the number of calls is in the millions.
// - Assumes input is a proper integer, fraction, or scientific format.
// - Matches library atof() to 15 digits (except at extreme exponents).
// - Follows atof() precedent of essentially no error checking.
//
// 09-May-2009 Tom Van Baak (tvb) www.LeapSecond.com
//
#define white_space(c) ((c) == ' ' || (c) == '\t')
#define valid_digit(c) ((c) >= '0' && (c) <= '9')
double atoff (const char *p)
{
int frac;
double sign, value, scale;
// Skip leading white space, if any.
while (white_space(*p) ) {
p += 1;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits before decimal point or exponent, if any.
for (value = 0.0; valid_digit(*p); p += 1) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double pow10 = 10.0;
p += 1;
while (valid_digit(*p)) {
value += (*p - '0') / pow10;
pow10 *= 10.0;
p += 1;
}
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
unsigned int expon;
// Get sign of exponent, if any.
p += 1;
if (*p == '-') {
frac = 1;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits of exponent, if any.
for (expon = 0; valid_digit(*p); p += 1) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
return sign * (frac ? (value / scale) : (value * scale));
}
|
5,618 | #include <assert.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include <fstream>
#include <iostream>
#include <math.h>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// ACO constants
#define ANTS 1024
#define ALPHA 2
#define BETA 10
#define RHO 0.5
#define Q 50
#define MAX_ITERATIONS 10
// Instance constants
#define NODES 105
#define DIST 10000
#define PHERO_INITIAL (1.0 / NODES)
#define TOTAL_DIST (DIST * NODES)
// Base structure for ants information
struct ant {
int curNode, nextNode, pathIndex;
int tabu[NODES];
int solution[NODES];
float solutionLen;
};
struct nodeTSP {
float x, y;
};
using namespace std;
// Host variables
float *heuristic;
double *phero;
struct ant antColony[ANTS];
float bestSol[ANTS];
float globalBest = TOTAL_DIST;
curandState state[ANTS];
const size_t heuristic_size = sizeof(float) * size_t(NODES * NODES);
const size_t phero_size = sizeof(double) * size_t(NODES * NODES);
// Device variables
float *heuristic_d;
double *phero_d;
struct ant *antColony_d;
float *bestSol_d;
curandState *state_d;
int BLOCKS, THREADS;
// Function headers
__global__ void initializeAnts(struct ant *antColony_d, curandState *state_d,
float *bestSol_d);
__global__ void setuCurandStates(curandState *stated_d, unsigned long t,
float *bestSol_d);
__global__ void restartAnts(struct ant *antColony_d, curandState *state_d,
float *bestSol_d);
__global__ void constructSolution(struct ant *antColony_d, curandState *state_d, float *heuristic_d, double *phero_d);
__global__ void atomicUpdate(struct ant *antColony_d, double *phero_d);
__device__ double probFunctionProduct(int from, int to, double *phero_d,float *heuristic_d);
__device__ int NextNode(struct ant *antColony_d, int pos, float *heuristic_d,double *phero_d, curandState *state_d);
float euclideanDistance(float x1, float x2, float y1, float y2) {
float xd = x1 - x2;
float yd = y1 - y2;
return (float)(sqrt(xd * xd + yd * yd));
}
void constructTSP(string graph, nodeTSP *nodes) {
ifstream infile(("instances/" + graph + ".tsp").c_str());
string line;
bool euclidean = true;
int node;
float x, y;
bool reading_nodes = false;
// check all file lines
while (getline(infile, line)) {
istringstream iss(line);
string word;
if (!reading_nodes) {
iss >> word;
if (word.compare("EDGE_WEIGHT_TYPE") == 0) {
iss >> word >> word;
cout << "edge type: " << word << endl;
euclidean = !word.compare("EUC_2D");
} else if (word.compare("NODE_COORD_SECTION") == 0) {
reading_nodes = true;
}
} else if (iss >> node >> x >> y) {
nodes[node - 1].x = x;
nodes[node - 1].y = y;
}
}
infile.close();
// Calculate distances between cities (edge weights)
for (int from = 0; from < NODES; from++) {
for (int to = from + 1; to < NODES; to++) {
float edge_weight;
if (euclidean) {
edge_weight = euclideanDistance(nodes[from].x, nodes[to].x,
nodes[from].y, nodes[to].y);
}
if (edge_weight == 0) {
edge_weight = 1.0;
}
heuristic[from +to * NODES] = edge_weight;
heuristic[to + from * NODES] = edge_weight;
phero[from + to * NODES] = PHERO_INITIAL;
phero[to + from * NODES] = PHERO_INITIAL;
}
} // end while that traverse all the lines in the file
}
__global__ void setupCurandStates(curandState *state_d, unsigned long t) {
int gid = blockDim.x * blockIdx.x + threadIdx.x;
curand_init(t, gid, 0, &state_d[gid]);
}
__global__ void initializeAnts(struct ant *antColony_d, curandState *state_d, float *bestSol_d) {
int ant_id = blockDim.x * blockIdx.x + threadIdx.x;
for (int node = 0; node < NODES; node++) {
antColony_d[ant_id].tabu[node] =
0; // set all nodes to nonvisited (0 means not in tabu list)
antColony_d[ant_id].solution[node] =
-1; // set all solution nodes as not in the solution (-1 means not in
// solution)
}
bestSol_d[ant_id] = (float)TOTAL_DIST;
// Select a the initial node randomly
antColony_d[ant_id].curNode = curand(&state_d[ant_id]) % NODES;
// Put the selected node in the solution list and in the tabu list
antColony_d[ant_id].solution[0] = antColony_d[ant_id].curNode;
antColony_d[ant_id].tabu[antColony_d[ant_id].curNode] =
1; // 1 means that the node has been already visited
antColony_d[ant_id].nextNode = -1; // we do not have a next node yet
antColony_d[ant_id].solutionLen = 0;
antColony_d[ant_id].pathIndex = 1;
}
__global__ void restartAnts(struct ant *antColony_d, curandState *state_d,
float *bestSol_d ) {
int ant_id = blockDim.x * blockIdx.x + threadIdx.x;
for (int node = 0; node < NODES; node++) {
antColony_d[ant_id].tabu[node] =
0; // set all nodes to nonvisited (0 means not in tabu list)
antColony_d[ant_id].solution[node] =
-1; // set all solution nodes as not in the solution (-1 means not in
// solution)
}
if (antColony_d[ant_id].solutionLen < bestSol_d[ant_id] &&
antColony_d[ant_id].solutionLen > 0) {
bestSol_d[ant_id] = antColony_d[ant_id].solutionLen;
}
// Select a the initial node randomly
antColony_d[ant_id].curNode = curand(&state_d[ant_id]) % NODES;
// Put the selected node in the solution list and in the tabu list
antColony_d[ant_id].solution[0] = antColony_d[ant_id].curNode;
antColony_d[ant_id].tabu[antColony_d[ant_id].curNode] =
1; // 1 means that the node has been already visited
antColony_d[ant_id].nextNode = -1; // we do not have a next node yet
antColony_d[ant_id].solutionLen = 0;
antColony_d[ant_id].pathIndex = 1;
}
void acoSolve() {
// This should iterate until the MAX_ITERATIONS number
int iteration = 0;
while (iteration++ < MAX_ITERATIONS) {
// Part I (Solution construction phase)
constructSolution<<<BLOCKS, THREADS>>>(antColony_d, state_d, heuristic_d,
phero_d);
cudaDeviceSynchronize();
// Move solution back to Host
cudaMemcpy(antColony, antColony_d, sizeof(antColony),
cudaMemcpyDeviceToHost);
for (int i = 0; i < ANTS; i++) {
}
// Part II (Pheromone update process)
// a. pheromone evaporation
for (int from = 0; from < NODES; from++) {
for (int to = 0; to < NODES; to++) {
// only take the nodes that are different (if a node goes from 1 to 1
// the len is 0 and we do not take care about this case)
if (from != to) {
phero[from + to * NODES] *= (1.0 - RHO);
// if phero reach a negative value we restart it with the initial
// value
if (phero[from + to * NODES] < 0.0) {
phero[from + to *NODES] = PHERO_INITIAL;
}
}
} // end to for
} // end from for
cudaMemcpy(phero_d, phero, phero_size, cudaMemcpyHostToDevice);
cudaMemcpy(bestSol, bestSol_d, sizeof(bestSol), cudaMemcpyDeviceToHost);
atomicUpdate<<<BLOCKS, THREADS>>>(antColony_d, phero_d);
// traverse all the ants and get
for (int i = 0; i < ANTS; i++) {
if (bestSol[i] < globalBest) {
globalBest = bestSol[i];
}
}
restartAnts<<<BLOCKS, THREADS>>>(antColony_d, state_d, bestSol_d);
cudaDeviceSynchronize();
} // end while iterations
printf("Best Solution %f ", globalBest);
}
__global__ void atomicUpdate(struct ant *antColony_d, double *phero_d) {
int ant_id = blockDim.x * blockIdx.x + threadIdx.x;
int from, to;
for (int i = 0; i < NODES; i++) {
from = antColony_d[ant_id].solution[i];
if (i > NODES - 1) {
to = antColony_d[ant_id].solution[i + 1];
} else {
to = antColony_d[ant_id].solution[0];
}
atomicAdd(&phero_d[from + to * NODES], Q / antColony_d[ant_id].solutionLen * RHO);
atomicAdd(&phero_d[from + to * NODES], Q / antColony_d[ant_id].solutionLen * RHO);
}
}
__global__ void constructSolution(struct ant *antColony_d, curandState *state_d,
float *heuristic_d, double *phero_d) {
int ant_id = blockDim.x * blockIdx.x + threadIdx.x;
int node = 0;
while (node++ < NODES) {
// Here we check if the solution is not complete (when the path Index is
// equal to the number of nodes we are done)
if (antColony_d[ant_id].pathIndex < NODES) {
// Select the next node
antColony_d[ant_id].nextNode =
NextNode(antColony_d, ant_id, heuristic_d, phero_d, state_d);
// Put the node in the tabu list and in the solution list
antColony_d[ant_id].tabu[antColony_d[ant_id].nextNode] = 1;
antColony_d[ant_id].solution[antColony_d[ant_id].pathIndex++] =
antColony_d[ant_id].nextNode;
// Add the distance to the solution Length
antColony_d[ant_id].solutionLen +=
heuristic_d[antColony_d[ant_id].curNode +
(antColony_d[ant_id].nextNode * NODES)];
// In the case we get the last Node we get the distance from these last
// node to the first node to gelin105t a closed tour
if (antColony_d[ant_id].pathIndex == NODES) {
antColony_d[ant_id].solutionLen +=
heuristic_d[antColony_d[ant_id].solution[NODES - 1] +
(antColony_d[ant_id].solution[0] * NODES)];
}
// Now the new selected node is the current Node
antColony_d[ant_id].curNode = antColony_d[ant_id].nextNode;
}
}
// printf("ant len %f", antColony_d[2].solutionLen);
}
__device__ double probFunctionProduct(int from, int to, double *phero_d,
float *heuristic_d) {
double result;
result = pow(phero_d[from + to * NODES], ALPHA) *
pow(1 / (heuristic_d[from + to * NODES]), BETA);
if (!isnan(result)) {
return (double)((result));
} else {
return 0;
}
}
__device__ int NextNode(struct ant *antColony_d, int pos, float *heuristic_d,
double *phero_d, curandState *state_d) {
int to, from;
double denom = 0.00000001;
from = antColony_d[pos].curNode;
for (to = 0; to < NODES; to++) {
if (antColony_d[pos].tabu[to] == 0) {
denom += probFunctionProduct(from, to, phero_d, heuristic_d);
}
}
assert(denom != 0.0);
to++;
int count = NODES - antColony_d[pos].pathIndex;
do {
double p;
to++;
if (to >= NODES)
to = 0;
if (antColony_d[pos].tabu[to] ==
0) { // 0 means not in tabu list (i.e., node enabled to participate in
// selection)
p = probFunctionProduct(from, to, phero_d, heuristic_d) / denom;
double x = (double)(curand(&state_d[pos]) % 1000000000) / 1000000000.0;
// When we get the roulette wheel selected element - break
if (x < p) {
break;
}
count--;
if (count == 0) {
break;
}
}
} while (1);
return to;
}
int main() {
// The next section will handle the execution time record
float exec_time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Set blocks and threads based on number of ants
if (ANTS <= 1024) {
BLOCKS = 1;
THREADS = ANTS;
} else {
THREADS = 1024;
BLOCKS = ceil(ANTS / (float)THREADS);
}
//allocate host memory
heuristic = (float *)malloc(NODES*NODES*sizeof(float));
phero = (double*)malloc(NODES*NODES*sizeof(double));
nodeTSP nodes[NODES];
constructTSP("lin105", nodes);
// allocate device memory
cudaMalloc((void **)&antColony_d, sizeof(antColony));
cudaMalloc((void **)&state_d, sizeof(state));
cudaMalloc((void **)&bestSol_d, sizeof(bestSol));
cudaMalloc((void **)&heuristic_d, heuristic_size);
cudaMalloc((void **)&phero_d, phero_size);
cudaMemcpy(heuristic_d, heuristic, heuristic_size, cudaMemcpyHostToDevice);
cudaMemcpy(phero_d, phero, phero_size, cudaMemcpyHostToDevice);
// set curand states
time_t t;
time(&t);
setupCurandStates<<<BLOCKS, THREADS>>>(state_d, (unsigned long)t);
cudaDeviceSynchronize();
// Initialization phase
initializeAnts<<<BLOCKS, THREADS>>>(antColony_d, state_d, bestSol_d);
cudaDeviceSynchronize();
cudaEventRecord(start, 0);
// Construction phase
acoSolve();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&exec_time, start, stop);
printf("%5.5f \n", exec_time / 1000); // time in ms is converted to seconds
// Free memory
free(phero);
free(heuristic);
cudaFree(antColony_d);
cudaFree(heuristic_d);
cudaFree(phero_d);
cudaFree(state_d);
cudaFree(bestSol_d);
return 0;
}
|
5,619 | #include "includes.h"
__global__ void matrixMulCUDA3(float *C, float *B, float *A, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x+ threadIdx.x;
float sum = 0.0f;
if (row >= n || col >= n) {
return;
}
for (int k = 0; k < n; k++) {
sum += A[row * n + k] * B[k * n + col];
}
C[row * n + col] = sum;
} |
5,620 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <string>
#include <algorithm>
#include <bits/stdc++.h>
#include <string.h>
#include <iomanip>
using namespace std;
/*
module purge
module load gcc/4.9.0
module load cmake/3.9.1
module load cuda
qsub -I -q coc-ice -l nodes=1:ppn=2:gpus=1,walltime=2:00:00,pmem=2gb
*/
/*
k = float
timesteps = integer
width/height = integer
default starting temp = float
location_x, location_y, location_z, width, height = integer
fixed_temperatures = float
*/
// CPU = Host
// GPU = Device
// global vars
#define T_P_B 512
string dimension;
float k;
int timesteps;
int width_i, height_i;
int depth_i = 1;
int location_x, location_y, location_z, width, height, depth;
float init_temp;
float fixed_temp;
float *h_grid;
float *h_fixed;
// struct block {
// int x;
// int y;
// int z;
// int width;
// int height;
// int depth;
// int size;
// float temp;
// };
// struct conf {
// string dimension;
// float k;
// int timesteps;
// int width_i;
// int height_i;
// int depth_i;
// float init_temp;
// vector<block> blocks;
// }
// global = call by HOST, run on DEVICE
// device = call by DEVICE, run on DEVICE
// HOST = no qualifier
__device__ int up(int idx, int width_i, int height_i) {
int idx_next = idx - width_i;
if(idx_next < 0)
idx_next = idx;
return idx_next;
}
__device__ int down(int idx, int width_i, int height_i) {
int idx_next = idx + width_i;
if(idx_next > width_i * height_i - 1)
idx_next = idx;
return idx_next;
}
__device__ int left(int idx, int width_i) {
if (idx % width_i != 0) {
return idx - 1;
} else {
return idx;
}
}
__device__ int right(int idx, int width_i) {
if (idx % width_i != (width_i - 1)) {
return idx + 1;
} else {
return idx;
}
}
__device__ int front(int idx, int width_i, int height_i) {
return idx - width_i * height_i;
}
__device__ int back(int idx, int width_i, int height_i) {
return idx + width_i * height_i;
}
// __device__ void swapPtr(float **oldPtr, float **newPtr) {
// int temp = *oldPtr;
// *oldPtr = *newPtr;
// *newPtr = temp;
// }
__global__ void heat2d(float *arr, float *temp, int width, int height, int size, float k) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// temp[idx] = arr[left(idx, width)] +
// arr[right(idx, width)] +
// arr[up(idx, width, height)] +
// arr[down(idx, width, height)];
temp[idx] = arr[idx] + k * (arr[left(idx, width)] +
arr[right(idx, width)] +
arr[up(idx, width, height)] +
arr[down(idx, width, height)] -
arr[idx] * 4);
// temp[idx] = arr[idx] + k * ((
// arr[left(idx, width)] +
// arr[right(idx, width)] +
// arr[up(idx, width, height)] +
// arr[down(idx, width, height)])/4.0-arr[idx]);
// filter
// x(t) --> filter ---> y(t)
// 1st low pass filter
// y(n) = y(n) * (1-k) + x(n)*k;
// k: 0-1
}
string deleteSpace(string str)
{
str.erase(remove(str.begin(), str.end(), ' '), str.end());
return str;
}
void parseConf(string filename) {
ifstream inFile;
inFile.open(filename.c_str());
if(!inFile) {
cerr << "Unable to open file datafile.txt";
exit(1); // call system to stop
}
// process conf file
string line;
int count = 0;
while(getline(inFile, line)) {
if(line[0] != '#' && (!line.empty())) {
if (count == 0) {
dimension = deleteSpace(line);
cout << dimension << endl;
} else if (count == 1) {
k = atof(deleteSpace(line).c_str());
cout << k << endl;
} else if (count == 2) {
timesteps = atoi(deleteSpace(line).c_str());
cout << timesteps << endl;
} else if (count == 3) {
// replace ',' with ' ' for easier parsing
replace(line.begin(), line.end(), ',', ' ');
if (dimension == "2D") {
vector<string> vec;
istringstream iss(line);
for(string line; iss >> line;) {
vec.push_back(line);
}
width_i = atoi(vec.at(0).c_str());
height_i = atoi(vec.at(1).c_str());
h_grid = new float[width_i * height_i];
h_fixed = new float[width_i * height_i];
cout << width_i << " " << height_i << endl;
} else {
vector<string> vec;
istringstream iss(line);
for(string line; iss >> line;) {
vec.push_back(line);
}
width_i = atoi(vec.at(0).c_str());
height_i = atoi(vec.at(1).c_str());
depth_i = atoi(vec.at(2).c_str());
h_grid = new float[width_i * height_i * depth_i];
h_fixed = new float[width_i * height_i * depth_i];
cout << width_i << " " << height_i << " " << depth_i << endl;
}
} else if (count == 4) {
init_temp = atof(deleteSpace(line).c_str());
if (dimension == "2D") {
for (int i = 0; i < width_i * height_i; i++) {
h_grid[i] = init_temp;
h_fixed[i] = -1;
}
} else {
for (int i = 0; i < width_i * height_i * depth_i; i++) {
h_grid[i] = init_temp;
h_fixed[i] = -1;
}
}
cout << init_temp << endl;
} else if (count >= 5 && !inFile.eof()) {
// replace ',' with ' ' for easier parsing
replace(line.begin(), line.end(), ',', ' ');
if (dimension == "2D") {
vector<string> vec;
istringstream iss(line);
for(string line; iss >> line;) {
vec.push_back(line);
}
location_x = atoi(vec.at(0).c_str());
location_y = atoi(vec.at(1).c_str());
width = atoi(vec.at(2).c_str());
height = atoi(vec.at(3).c_str());
fixed_temp = atof(vec.at(4).c_str());
int start_point = location_y * width_i + location_x;
cout << location_x << " " << location_y << " " << width << " " << height << " " << fixed_temp << endl;
int row = start_point / width_i;
for(int i = start_point; i < start_point + width + (height - 1) * width_i; i++) {
// cout << "debug: " << width + row * width_i + start_point % width_i << endl;
if (i < width + row * width_i + start_point % width_i && i >= row * width_i + start_point % width_i) {
h_fixed[i] = fixed_temp;
cout << i << endl;
}
if (i == -1 + (row + 1) * width_i) {
row++;
}
}
// (location_y * width_i + location_x)
} else {
vector<string> vec;
istringstream iss(line);
for(string line; iss >> line;) {
vec.push_back(line);
}
location_x = atoi(vec.at(0).c_str());
location_y = atoi(vec.at(1).c_str());
location_z = atoi(vec.at(2).c_str());
width = atoi(vec.at(3).c_str());
height = atoi(vec.at(4).c_str());
depth = atoi(vec.at(5).c_str());
fixed_temp = atof(vec.at(6).c_str());
cout << location_x << " " << location_y << " " << location_z << " " << width << " " << height << " " << depth << " " << fixed_temp << endl;
}
}
count++;
}
}
inFile.close();
}
void printToCSV(float *h_grid) {
std::ofstream myFile("heatOutput.csv");
for (int i = 0; i < width_i * height_i; i++) {
// cout.setprecision(1);
// myFile.setprecision(1);
cout << h_grid[i] << ", ";
myFile << h_grid[i];
if (((i+1) % width_i) == 0 && i != 0) {
cout << endl;
myFile << endl;
} else {
myFile << ", ";
}
}
}
void set_fixed(float *d_new, float *h_fixed, int size) {
for (int i = 0; i < size; i++) {
if (h_fixed[i] != -1) {
d_new[i] = h_fixed[i];
}
}
}
// __global__ void vectorAdd(int *a, int *b, int *c, int n) {
// int id = threadIdx.x + blockIdx.x * blockDim.x;
// int i = threadIdx.x;
// if (i < n) {
// c[i] = a[i] + b[i];
// }
// }
int main(int argc, char** argv) {
string filename = argv[1];
parseConf(filename);
int area = width_i * height_i;
int size = width_i * height_i * depth_i;
int N = (size + T_P_B - 1) / T_P_B;
float *d_old, *d_new;
cout << endl << endl << endl;
for (int i = 0; i < width_i * height_i; i++) {
cout << h_fixed[i] << " ";
if (((i+1) % width_i) == 0 && i != 0) {
cout << endl;
}
}
cout << endl << endl;
for (int i = 0; i < width_i * height_i; i++) {
if (h_fixed[i] != -1) {
h_grid[i] = h_fixed[i];
}
cout << h_grid[i] << " ";
if (((i+1) % width_i) == 0 && i != 0) {
cout << endl;
}
}
cout << endl << endl;
// cudaMalloc(void **devPtr, size_t sizeInBytes);
cudaMalloc((void **) &d_old, size * sizeof(float));
cudaMalloc((void **) &d_new, size * sizeof(float));
cout << "T_P_B: " << (width_i*height_i + 32 - 1)/32 << endl;
// cudaMemcpy(void *dest, void *src, size_t, sizeinBytes, enum direction);
// cudaMemcpy(d_old, h_grid, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_new, h_grid, size * sizeof(float), cudaMemcpyHostToDevice);
cout << timesteps << endl;
// <<<numberoOfBlocks, numberOfThreadsperBlock>>>
for (int i = 0; i < timesteps; i++) {
cudaMemcpy(d_old, h_grid, size * sizeof(float), cudaMemcpyHostToDevice);
cout << i << endl;
// if (strcmp(dimension.c_str(), "2D")) {
// copy array elements
heat2d<<<N, T_P_B>>>(d_old, d_new, width_i, height_i, size, k);
cudaDeviceSynchronize();
// set_fixed(d_new, h_fixed, size);
// }
cudaMemcpy(h_grid, d_new, size * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < width_i * height_i; i++) {
if (h_fixed[i] != -1) {
h_grid[i] = h_fixed[i];
}
printf("%.2f ", h_grid[i]);
if (((i+1) % width_i) == 0 && i != 0) {
cout << endl;
}
}
cout << endl << endl;
}
// cudaMemcpy(h_grid, d_old, N * sizeof(float), cudaMemcpyDeviceToHost);
printToCSV(h_grid);
// print to debug
// cudaFree(void **devPtr);
cudaFree(d_old);
cudaFree(d_new);
return 0;
} |
5,621 | /* Single-Precision AX+Y in Cuda
*******************************************************************
* Description:
* Populate two vectors each of size N. In the first vector,
* multiply each element by some constant scalar and then sum
* this product with with the elment at same index in other
* vector. This result gets stored in the second vector.
*******************************************************************
* Source:
* https://devblogs.nvidia.com/even-easier-introduction-cuda/
*******************************************************************
*/
#include <ctime>
#include <iostream>
#include <math.h>
#include <string>
using namespace std;
/* saxpy function for kernel */
__global__
void saxpy(int n, float a, float *x, float *y) {
// give index to each thread
int index = blockIdx.x * blockDim.x + threadIdx.x;
// set total number of threads in grid
int stride = blockDim.x * gridDim.x;
// iterate through all N elements via grid-stride loop
for (int i = index; i < n; i += stride)
// do AX+Y
y[i] = a * x[i] + y[i];
}
/* program's main() */
int main(int argc, char* argv[]) {
// initialize default vector size, run count, and threads per block
int N = 1000000;
int R = 1000;
int T = 256;
// assign new values to N (and R) if arguments provided
if (argc > 2) {
// iterate over arguments
for (int i = 0; i < argc; i++) {
// get current argument
string arg = argv[i];
// if size specified
if (arg.compare("-n") == 0) {
N = stoi(argv[i + 1]);
}
// if run count specified
else if (arg.compare("-r") == 0) {
R = stoi(argv[i + 1]);
}
// if thread count specified
else if (arg.compare("-t") == 0) {
T = stoi(argv[i + 1]);
}
}
}
// print info
cout << "========================================" << endl;
cout << "|\tSingle-Precision AX+Y" << endl;
cout << "========================================" << endl;
cout << "|\tUsing CUDA 9.2" << endl;
cout << "|\tN = " << N << endl;
cout << "|\tRuns = " << R << endl;
cout << "|\tThreads/Block = " << T << endl;
cout << "|" << endl;
cout << "|\trunning..." << endl;
// initialize the float vectors
float *x, *y;
// allocate unified memory
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// populate vectors
for (int i = 0; i < N; i++) {
y[i] = 1.0f;
x[i] = 2.0f;
}
// initialize clock
clock_t start = clock();
// perform algorithm R times
for (int i = 0; i < R; i++) {
// perform saxpy on GPU
saxpy<<<(N + T - 1) / T, T>>>(N, 2.0f, x, y);
}
// wait for GPU before continuing on CPU
cudaDeviceSynchronize();
// stop clock
clock_t stop = clock();
// counter for errors
int errors = 0;
// iterate vector to check for errors
for (int i = 0; i < N; i++) {
// increment error counter when unexpected value in index
if (fabs(y[i] - (4 * R + 1.0f)) > 0.0f)
errors++;
}
// print end status
cout << "|\t done!" << endl;
cout << "|" << endl;
cout << "|\tCalculation Errors = " << errors << endl;
cout << "|\tTime = " << (stop - start) / (double) CLOCKS_PER_SEC << " seconds" << endl;
cout << "========================================" << endl;
// free allocated memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
5,622 | #include <iostream>
#include <vector>
using namespace std;
// Create a kernel to estimate pi
__global__
void count_samples_in_circles(float* d_randNumsX, float* d_randNumsY, int* d_countInBlocks, int num_blocks, int nsamples)
{
__shared__ int shared_blocks[500];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * num_blocks;
// Iterates through
int inCircle = 0;
for (int i = index; i < nsamples; i+= stride) {
float xValue = d_randNumsX[i];
float yValue = d_randNumsY[i];
if (xValue*xValue + yValue*yValue <= 1.0f) {
inCircle++;
}
}
shared_blocks[threadIdx.x] = inCircle;
__syncthreads();
// Pick thread 0 for each block to collect all points from each Thread.
if (threadIdx.x == 0)
{
int totalInCircleForABlock = 0;
for (int j = 0; j < blockDim.x; j++)
{
totalInCircleForABlock += shared_blocks[j];
}
d_countInBlocks[blockIdx.x] = totalInCircleForABlock;
}
}
int nsamples = 1e8;
int main(void) {
// allocate space to hold random values
vector<float> h_randNumsX(nsamples);
vector<float> h_randNumsY(nsamples);
srand(time(NULL)); // seed with system clock
//Initialize vector with random values
for (int i = 0; i < h_randNumsX.size(); ++i)
{
h_randNumsX[i] = float(rand()) / RAND_MAX;
h_randNumsY[i] = float(rand()) / RAND_MAX;
}
// Send random values to the GPU
size_t size = nsamples * sizeof(float);
float* d_randNumsX;
float* d_randNumsY;
cudaMalloc(&d_randNumsX, size);
cudaMalloc(&d_randNumsY, size);
cudaMemcpy(d_randNumsX, &h_randNumsX.front(), size, cudaMemcpyHostToDevice);
cudaMemcpy(d_randNumsY, &h_randNumsY.front(), size, cudaMemcpyHostToDevice);
// Launch kernel to count samples that fell inside unit circle
int threadsPerBlock = 500;
int num_blocks = nsamples / (1000 * threadsPerBlock);
size_t countBlocks = num_blocks * sizeof(int);
int* d_countInBlocks;
cudaMalloc(&d_countInBlocks, countBlocks);
// CALL KERNEL
count_samples_in_circles<<<num_blocks, threadsPerBlock>>>(d_randNumsX, d_randNumsY, d_countInBlocks, num_blocks, nsamples);
if ( cudaSuccess != cudaGetLastError() )
cout << "Error!\n";
// Return back the vector from device to host
int* h_countInBlocks = new int[num_blocks];
cudaMemcpy(h_countInBlocks, d_countInBlocks, countBlocks, cudaMemcpyDeviceToHost);
int nsamples_in_circle = 0;
for (int i = 0 ; i < num_blocks; i++) {
//cout << "Value in block " + i << " is " << h_countInBlocks[i] << endl;
nsamples_in_circle = nsamples_in_circle + h_countInBlocks[i];
}
cudaFree(d_randNumsX);
cudaFree(d_randNumsY);
cudaFree(d_countInBlocks);
// fraction that fell within (quarter) of unit circle
float estimatedValue = 4.0 * float(nsamples_in_circle) / nsamples;
cout << "Estimated Value: " << estimatedValue << endl;
}
|
5,623 | #include <complex>
#include <cstdio>
#include <cufft.h>
#include <math.h>
#include <string.h>
using namespace std;
__global__ void getPhi(double *d_phi_k, int Nx, int Ny, int Nz_half, double L){
// Change made inside d_phi_k
int N = Nx * Ny * Nz_half;
int index = blockDim.x * blockIdx.x + threadIdx.x;
int nx, ny;
double kx2, ky2, kz2;
while(index < N) {
int i = index / (Ny * Nz_half);
int j = (index / Nz_half) % Ny;
int k = index % Nz_half;
if(2 * i < Nx){
nx = i;
}
else{
nx = Nx - i;
}
if(2 * j < Ny){
ny = j;
}
else{
ny = Ny - j;
}
kx2 = pow(2.0 * M_PI * (double)nx / L, 2);
ky2 = pow(2.0 * M_PI * (double)ny / L, 2);
kz2 = pow(2.0 * M_PI * (double)k / L, 2);
if(index != 0){
// d_phi_k[2*index] = 4.0 * M_PI * d_phi_k[2*index] / (kx2 + ky2 + kz2);
// d_phi_k[2*index+1] = 4.0 * M_PI * d_phi_k[2*index+1] / (kx2 + ky2 + kz2);
d_phi_k[2*index] = d_phi_k[2*index] / (kx2 + ky2 + kz2);
d_phi_k[2*index+1] = d_phi_k[2*index+1] / (kx2 + ky2 + kz2);
}
index = index + blockDim.x * gridDim.x;
}
}
int main ()
{
// Set GPU Device
int gid;
printf("Enter the GPU ID (0/1): ");
scanf("%d",&gid);
printf("%d\n", gid);
cudaSetDevice(gid);
int Nx, Ny, Nz, N;
printf("Enter the sample points of the cube in each side: ");
scanf("%d", &Nx);
printf("Each side sample points = %d\n", Nx);
Ny = Nx;
Nz = Nx;
N = pow(Nx, 3);
double dx = 1.0; // First fixed dx. TODO
double L = dx * (double)Nx;
// Do not fix dx
// printf("Enter the length of the cube: ");
// scanf("%lf", &L);
// printf("Length = %.2lf\n", L);
// dx = L / (double) Nx;
// printf("dx = %.2lf\n", dx);
int io;
printf("Print the data (0/1) ? ");
scanf("%d",&io);
printf("%d\n", io);
/*
Initialize
*/
double *lo;
complex<double> *lo_k;
lo = (double*) malloc(sizeof(double) * N);
lo_k = (complex<double> *) malloc(sizeof(complex<double>) * Nx * Ny * (Nz/2+1));
memset(lo, 0.0, sizeof(double) * N);
// point charge at the origin
lo[0] = 1.0;
/*
Poisson Eq with FFT method
*/
// FFT lo -> lo_k
cufftHandle plan;
cufftDoubleReal *dataIn;
cufftDoubleComplex *dataOut;
cudaMalloc((void**)&dataIn, sizeof(cufftDoubleReal) * N);
cudaMalloc((void**)&dataOut, sizeof(cufftDoubleComplex) * N);
cudaMemcpy(dataIn, lo, sizeof(cufftDoubleReal) * N, cudaMemcpyHostToDevice);
if(cufftPlan3d(&plan, Nx, Ny, Nz, CUFFT_D2Z) != CUFFT_SUCCESS){
printf("CUFFT error: cufftPlan3d creation failed.\n");
exit(1);
}
if(cufftExecD2Z(plan, dataIn, dataOut) != CUFFT_SUCCESS){
printf("CUFFT error: cufftExecD2Z forward failed.\n");
exit(1);
}
// Copy only the non redundant data
cudaMemcpy(lo_k, dataOut, sizeof(cufftDoubleComplex) * Nx * Ny * (Nz/2+1), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cufftDestroy(plan);
cudaFree(dataIn);
cudaFree(dataOut);
free(lo);
// Print the data of lo_k
// for(int i = 0; i < Nx * Ny * (Nz/2+1); i = i+1){
// printf("%.3lf + i * %.3lf\n", real(lo_k[i]), imag(lo_k[i]));
// }
// Calculate lo_k / k**2 = phi_k
complex<double> *phi_k;
double *d_phi_k;
phi_k = (complex<double> *)malloc(sizeof(complex<double>) * Nx * Ny * (Nz/2+1));
cudaMalloc((void**)&d_phi_k, sizeof(double) * 2 * Nx * Ny * (Nz/2+1));
cudaMemcpy(d_phi_k, lo_k, sizeof(double) * 2 * Nx * Ny * (Nz/2+ 1), cudaMemcpyHostToDevice);
getPhi <<<64, 64>>> (d_phi_k, Nx, Ny, Nz/2+1, L);
cudaMemcpy(phi_k, d_phi_k, sizeof(double) * 2 * Nx * Ny * (Nz/2+1), cudaMemcpyDeviceToHost);
cudaFree(d_phi_k);
free(lo_k);
// IFFT phi_k -> phi
double *phi;
phi = (double*) malloc(sizeof(double) * N);
cudaMalloc((void**)&dataIn, sizeof(cufftDoubleReal) * N);
cudaMalloc((void**)&dataOut, sizeof(cufftDoubleComplex) * Nx * Ny * (Nz/2+1));
cudaMemcpy(dataOut, phi_k, sizeof(cufftDoubleComplex) * Nx * Ny * (Nz/2+1), cudaMemcpyHostToDevice);
if(cufftPlan3d(&plan, Nx, Ny, Nz, CUFFT_Z2D) != CUFFT_SUCCESS){
printf("CUFFT error: cufftPlan3d creation failed.\n");
exit(1);
}
if(cufftExecZ2D(plan, dataOut, dataIn) != CUFFT_SUCCESS){
printf("CUFFT error: cufftExecZ2D forward failed.\n");
exit(1);
}
cudaMemcpy(phi, dataIn, sizeof(cufftDoubleReal) * N, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(dataIn);
cudaFree(dataOut);
free(phi_k);
// Print out on screen
if(io == 1){
printf("phi-X r phi-D r\n");
for(int i = 0; i < Nx; i = i+1){
printf("%.5lf %.5lf ", (phi[i] - phi[1]) / (double)N, (double)i * dx);
printf("%.5lf %.5lf\n", (phi[i*Ny*Nz + i*Ny + i] - phi[1]) / (double)N, sqrt(3.0 * pow((double)i * dx,2)));
}
}
// Print to file
cufftDestroy(plan);
cudaDeviceReset();
return 0;
}
// eof
|
5,624 | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
#include <iostream>
#include <vector>
#include <future>
void getDeviceInformation() {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
printf("No CUDA GPU has been detected\n");
}
else {
printf("Device name: %s\n", deviceProp.name);
printf("Total Global Memory: %d\n", deviceProp.regsPerMultiprocessor);
printf("Total shared mem per block: %d\n", deviceProp.sharedMemPerMultiprocessor);
printf("Total const mem size: %d\n", deviceProp.maxThreadsPerMultiProcessor);
}
}
//cudaStream_t streams[NUM_STREAMS];
void exec(const char* s){
system(s);
}
class Scheduler {
std::vector<std::string> programs;
std::vector<int> map;
int i=0;
public:
Scheduler(){
}
void programCall(std::string str) {
//std::cout << str << "\n";
programs.push_back(str);
map.push_back(-1);
}
void schedule(){
}
void execute(){
//cudaProfilerStart();
for(auto f : programs){
//std::async(std::launch::async, exec, f.data());
std::thread t1(exec, f.data());
t1.join();
}
/*for(auto f : programs){
//std::async(std::launch::async, exec, f.data());
t1(exec, f.data());
}*/
//cudaProfilerStop();
}
};
int main(int argc, char **argv) {
getDeviceInformation();
Scheduler s;
std::string line = "";
while(line != " ") {
std::getline (std::cin, line);
//std::cout << line << "\n";
//std::string str = argv[i];//"./hotspot 1024 2 2 ../../data/hotspot/temp_1024 ../../data/hotspot/power_1024 output.out";
s.programCall(line);
// s.schedule();
}
s.execute();
return 0;
}
|
5,625 |
#include <cuda.h>
inline __global__ void KernelTest(int * a, int * b, int * res, int size)
{
// Calcul de l'indice du tableau
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
res[x] = a[x] + b[x];
}
extern "C"
void runKernel(int * a, int * b, int * res, int size)
{
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(size / dimBlock.x, 1, 1);
KernelTest<<< dimGrid, dimBlock, 0 >>>( a, b, res, size);
}
|
5,626 | #include <math.h>
// function add
// m1, m2 are vectors, matrices or tensors of the same size
// 1 block, 1 dimensional block size
__global__ void add(float* m1, float* m2, float* m3, int n){
for(int index=threadIdx.x;index<n;index+=blockDim.x)
m3[index]=m1[index]+m2[index];
}
// function minus
// m1, m2 are vectors, matrices or tensors of the same size
// 1 block, 1 dimensional block size
__global__ void minus(float* m1, float* m2, float* m3, int n){
for(int index=threadIdx.x;index<n;index+=blockDim.x)
m3[index]=m1[index]-m2[index];
}
// function mul
// 1 block, 1 dimensional block size
__global__ void mul(float* m1, float factor, float* m2, int n){
for(int index=threadIdx.x;index<n;index+=blockDim.x)
m2[index]=m1[index]*factor;
}
// function div
// 1 block, 1 dimensional block size
__global__ void div(float* m1, float divsor, float* m2, int n){
for(int index=threadIdx.x;index<n;index+=blockDim.x)
m2[index]=m1[index]/divsor;
}
// function pow
// 1 block, 1 dimensional block size
__global__ void pow(float* m1, float power, float* m2, int n){
for(int index=threadIdx.x;index<n;index+=blockDim.x)
m2[index]=pow(m1[index],power);
}
// function sqrt
// 1 block, 1 dimensional block size
__global__ void sqrt(float* m1, float* m2, int n){
for(int index=threadIdx.x;index<n;index+=blockDim.x)
m2[index]=sqrt(m1[index]);
}
// function dot
// m1 of shape H*L, m2 of shape L*M and m3 of shape H*M
// 1 block, 2 dimensional block size
__global__ void dot(float* m1, float* m2, float* m3,
int size_h, int size_l, int size_m){
int h=size_h, l=size_l, m=size_m;
for(int h_index=threadIdx.x;h_index<h;h_index+=blockDim.x){
for(int m_index=threadIdx.y;m_index<m;m_index+=blockDim.y){
float value=0.0;
for(int l_index=0;l_index<l;l_index++){
float x=m1[h_index*l+l_index];
float y=m2[l_index*m+m_index];
value+=x*y;
}
m3[h_index*m+m_index]=value;
}
}
}
|
5,627 | // This is a generated file, do not edit it!
#pragma once
#include <stdint.h>
typedef struct Split {
float Entropy;
uint8_t SplitType;
uint8_t Axis;
int32_t Column;
float SplitAttribute;
uint32_t SplitCategories;
} Split;
|
5,628 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <exception>
#include <iostream>
#include <sstream>
#include <string>
#define CE(err) \
{ \
if (err != cudaSuccess) \
{ \
std::stringstream err_ss; \
err_ss << "CUDA error in " << __FUNCTION__ << " (" << __FILE__ \
<< ":" << __LINE__ << ") - " << cudaGetErrorString(err); \
throw std::runtime_error(err_ss.str()); \
} \
}
namespace pcw {
template <typename T>
T* alloc(size_t count = 1)
{
T* device_ptr;
CE(cudaMalloc(&device_ptr, sizeof(T) * count));
return device_ptr;
}
template <typename T>
void copy_to(T* const dest, T const* const src, size_t count = 1)
{
CE(cudaMemcpy(dest, src, sizeof(T) * count, cudaMemcpyHostToDevice));
}
template <typename T>
void copy_from(T* const dest, T const* const src, size_t count = 1)
{
CE(cudaMemcpy(dest, src, sizeof(T) * count, cudaMemcpyDeviceToHost));
}
template <typename T>
void free(T* const device_ptr)
{
CE(cudaFree(device_ptr));
}
}
constexpr size_t BLOCK_SIZE = 16;
template <typename T>
size_t determine_width(std::initializer_list<std::initializer_list<T>> lst)
{
size_t wd = 0;
for (auto const& rw : lst)
{
wd = std::max(wd, rw.size());
}
return wd;
}
struct matrix_t
{
size_t width = 0;
size_t height = 0;
float* elements = nullptr;
void operator=(std::initializer_list<std::initializer_list<float>> vals)
{
assert(width == determine_width(vals));
assert(height == vals.size());
size_t i = 0;
for (auto const& rw : vals)
{
std::fill(std::copy(rw.begin(), rw.end(), elements + width * i),
elements + width * (i + 1),
0.0f);
++i;
}
}
size_t element_count() const
{
return width * height;
}
__host__ __device__ float operator()(size_t wd, size_t ht) const
{
return elements[wd * width + ht];
}
__host__ __device__ float& operator()(size_t wd, size_t ht)
{
return elements[wd * width + ht];
}
};
std::ostream& operator<<(std::ostream& os, matrix_t const& m)
{
os << "{\n";
for (size_t i = 0; i < m.height; ++i)
{
os << " {";
for (size_t j = 0; j < m.width; ++j)
{
os << m(i, j) << ", ";
}
os << "}\n";
}
os << "}\n";
return os;
}
__global__ void multiply_matrices_kernel(matrix_t const,
matrix_t const,
matrix_t);
void multiply_matrices(matrix_t const a, matrix_t const b, matrix_t c)
{
matrix_t d_a;
{
d_a.width = a.width;
d_a.height = a.height;
d_a.elements = pcw::alloc<float>(d_a.element_count());
pcw::copy_to(d_a.elements, a.elements, a.element_count());
}
matrix_t d_b;
{
d_b.width = b.width;
d_b.height = b.height;
d_b.elements = pcw::alloc<float>(d_b.element_count());
pcw::copy_to(d_b.elements, b.elements, b.element_count());
}
matrix_t d_c;
{
d_c.width = c.width;
d_c.height = c.height;
d_c.elements = pcw::alloc<float>(d_c.element_count());
}
dim3 block_dim(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid_dim((unsigned int) (b.width + block_dim.x - 1) / block_dim.x,
(unsigned int) (a.height + block_dim.y - 1) / block_dim.y);
multiply_matrices_kernel<<<grid_dim, block_dim>>>(d_a, d_b, d_c);
{
pcw::copy_from(c.elements, d_c.elements, c.element_count());
}
pcw::free(d_a.elements);
pcw::free(d_b.elements);
pcw::free(d_c.elements);
}
__global__ void multiply_matrices_kernel(matrix_t const a,
matrix_t const b,
matrix_t c)
{
size_t const row = blockIdx.y * blockDim.y + threadIdx.y;
size_t const col = blockIdx.x * blockDim.x + threadIdx.x;
float c_value = 0.0f;
for (size_t e = 0; e < a.width; ++e)
{
c_value += a(row, e) * b(e, col);
//std::printf("c_value = %g\n", c_value);
c(row, col) = c_value;
}
}
int main()
{
try
{
matrix_t a;
a.height = 2;
a.width = 3;
a.elements = new float[a.element_count()];
a = {{2, 1, 4}, {0, 1, 1}};
std::cout << a;
matrix_t b;
b.height = 3;
b.width = 4;
b.elements = new float[b.element_count()];
b = {{6, 3, -1, 0}, {1, 1, 0, 4}, {-2, 5, 0, 2}};
std::cout << b;
matrix_t c;
c.height = 2;
c.width = 4;
c.elements = new float[c.element_count()];
std::fill_n(c.elements, c.element_count(), 0.0f);
multiply_matrices(a, b, c);
std::cout << c;
//assert(5 == c.elements[0]);
//assert(27 == c.elements[1]);
//assert(-2 == c.elements[2]);
//assert(12 == c.elements[3]);
//assert(-1 == c.elements[4]);
//assert(6 == c.elements[5]);
//assert(0 == c.elements[6]);
//assert(6 == c.elements[7]);
delete[] a.elements;
delete[] b.elements;
delete[] c.elements;
}
catch (std::exception const& ex)
{
std::cout << "exception: " << ex.what() << "\n";
return 1;
}
return 0;
}
|
5,629 | #include <iostream>
#include <cuda.h>
#include <time.h>
#include <math.h>
#define row 100
#define col 100
void handle_error(cudaError_t error) {
if (error != cudaSuccess) {
std::cout << "Error in cuda. Waiting...";
exit(0);
}
}
int get_rand_in_range() {
return rand()%256;
}
__global__ void convert(float *input_image, float *output_image, int no_of_threads) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
for(int i = index; i <= (row*col); i = i + (blockDim.x*no_of_threads)) {
float r = input_image[3*i];
float g = input_image[3*i + 1];
float b = input_image[3*i + 2] ;
output_image[index] = (0.21*r + 0.71*g + 0.07*b);
}
}
void initialise_matrix(float A[], int m, int n) {
for(int i = 0; i < m; i++) {
for(int j = 0; j < n; j++) {
for(int k = 0; k < 3; k++ ) {
A[(i*n + j)*3 + k] = get_rand_in_range();
}
}
}
}
int main() {
srand(time(NULL));
float image[row * col * 3], gray_image[row * col];
float *device_image, *output_image;
handle_error(cudaMalloc((void **)&device_image, row * col * 3 * sizeof(float)));
handle_error(cudaMalloc((void **)&output_image, row * col * sizeof(float)));
initialise_matrix(image, row, col);
cudaMemcpy(device_image, image, row * col * 3 * sizeof(float), cudaMemcpyHostToDevice);
dim3 grid_dim(256,1,1);
dim3 block_dim(256,1,1);
convert<<<grid_dim, block_dim>>>(device_image, output_image, 256);
cudaMemcpy(gray_image, output_image, row * col * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(device_image);
cudaFree(output_image);
return 0;
}
|
5,630 | #include <stdio.h>
__global__
void update(float *ad, float *bd, int ny,int nx)
{
int x, y;
x = blockIdx.x;
y = threadIdx.x;
if(x > 0 && y > 0 && x < nx-1 && y < ny-1)
bd[x*ny+y] = ad[x*ny+y] + (ad[(x+1)*ny+y] + ad[(x-1)*ny+y] - 2 * ad[x*ny+y])/10 + (ad[x*ny+(y+1)] + ad[x*ny+(y-1)] - 2 * ad[x*ny+y])/10;
}
extern "C" float updateGPU(float **arr1, float **arr2, int nx, int ny, int steps)
{
float *ad,*bd,s[nx*ny], milli = 0.0;
int i, j;
size_t size = nx*ny*sizeof(float);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("= mpi_heat2D - CUDA Version =\nGrid size: X = %d, Y = %d, Time steps = %d\n",nx,ny,steps);
for (i = 0;i<nx;i++)
for (j = 0;j<ny;j++){
s[i*ny+j] = arr1[i][j];
}
cudaMalloc( (void**)&ad, size);
cudaMemcpy( ad, s, size, cudaMemcpyHostToDevice );
cudaMalloc( (void**)&bd, size);
dim3 threads_per_block(ny);
dim3 num_blocks(nx,1);
cudaEventRecord(start);
for(i = 0; i < steps; i++){
if(i%2 == 0)
update<<<num_blocks, threads_per_block>>>(ad, bd, ny,nx);
else
update<<<num_blocks, threads_per_block>>>(bd, ad, ny,nx);
}
cudaEventRecord(stop);
if (i%2 == 0)
cudaMemcpy( s, ad, size, cudaMemcpyDeviceToHost );
else
cudaMemcpy( s, bd, size, cudaMemcpyDeviceToHost );
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("Time Elapsed is %2.6f seconds\n",milli/1000);
cudaFree( ad );
cudaFree( bd );
cudaEventDestroy(start);
cudaEventDestroy(stop);
for (i = 0;i<nx;i++)
for (j = 0;j<ny;j++)
arr1[i][j] = s[i*ny+j];
return 1;
} |
5,631 | #include<iostream>
#include<stdio.h>
#include<time.h>
#include<stdlib.h>
using namespace std;
//max no. of blocks 65535
__global__ void square(long long *d_in,long long *d_out,long long d_n,long long *d_get_blockDim)
{
long long idx=1024*blockIdx.x+threadIdx.x;
if(idx<d_n)
{
int temp=d_in[idx];
d_out[idx]=temp*temp;
d_out[idx]=idx;
}
*d_get_blockDim=blockDim.x;
// cout<<threadIdx.x<<" "<<b[threadIdx.x]<<" ";
}
int main()
{
long long n;
cin>>n;
long long *h_a;
h_a=(long long *)malloc(n*sizeof(n));
long long *h_b;
h_b=(long long *)malloc(n*sizeof(long long));
for(int i=0;i<n;i++)
h_a[i]=i;
long long *d_get_blockDim;
long long *d_in;
long long *d_out;
long long d_n=n;
cudaMalloc((void **)&d_in,n*sizeof(n));
cudaMalloc((void **)&d_out,n*sizeof(long long));
cudaMalloc((void **)&d_get_blockDim,sizeof(long long));
cudaMemcpy(d_in,h_a,n*sizeof(n),cudaMemcpyHostToDevice);
clock_t start,end;
start=clock();
// long long i=0;
for(long long i=0;i<n;i+=67107840)
{
long long noBlocks=(n-i)/1024;
if(noBlocks<(n-i)/1024.0)
noBlocks++;
square <<<noBlocks,1024>>> (d_in+i,d_out+i,d_n-i,d_get_blockDim);
}
end=clock();
cudaMemcpy(h_b,d_out,n*sizeof(n),cudaMemcpyDeviceToHost);
/*
for(int i=0;i<n;i++)
cout<<h_b[i]<<" ";
*/
long long h_blockDim;
cudaMemcpy(&h_blockDim,d_get_blockDim,sizeof(long long),cudaMemcpyDeviceToHost);
cout<<h_b[n-1]<<"\n";
cout<<"Block Dimension is:"<<h_blockDim<<endl;
free(h_a);
free(h_b);
cudaFree(d_in);
cudaFree(d_out);
printf("It took %0.9f seconds\n",(end-start)*1.0/CLOCKS_PER_SEC);
}
|
5,632 | #include "c-product.cuh"
#include <stdio.h>
#include <stdlib.h>
__global__
void product_iterator(unsigned char* set[], int size[], int count, long int all)
{
int *index;
index = (int *)malloc(count * sizeof(int));
int *delta;
delta = (int *)malloc(count * sizeof(int));
// Block and thread params
long int init = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
updateIndex(delta, size, count, stride, all);
updateIndex(index, size, count, init, all);
for (long int i = init; i < all; i+= stride) {
// update the index according to the blockDim.x and gridDim.x
// process the subkeys
for (int j = 0;j < count; j++){
set[j][index[j]] += index[j];
}
// printf("\n");
// printf("%d", i);
if (i + stride < all){
increment(index, size, count, delta);
}
}
free(index);
return;
}
__device__
void updateIndex(int index[], int size[], int count, long int num, long int all){
long int weight = all;
for (int i = 0; i < count; i++){
weight /= size[i];
index[i] = num / weight;
num -= index[i] * weight;
}
}
__device__
void increment(int index[], int size[], int count, int delta[]){
for (int i = count-1; i >= 0; i--){
index[i] += delta[i];
while(index[i] >= size[i]){
index[i] -= size[i];
index[i-1]++;
}
}
} |
5,633 | #include <stdlib.h>
#include <stdio.h>
__global__ void square(float *d_out, float *d_in) {
int i = threadIdx.x;
float f = d_in[i];
d_out[i] = f*f;
}
int main(int argc, char *argv[]) {
const int ARRAY_COUNT = 64;
float h_in[ARRAY_COUNT], h_out[ARRAY_COUNT];
for(int i=0 ; i<ARRAY_COUNT ; ++i) {
h_in[i] = i;
}
const int ARRAY_SIZE = ARRAY_COUNT * sizeof *h_in;
float* d_in = NULL;
float* d_out = NULL;
cudaMalloc((void**) &d_in, ARRAY_SIZE);
cudaMalloc((void**) &d_out, ARRAY_SIZE);
cudaMemcpy(d_in, h_in, ARRAY_SIZE, cudaMemcpyHostToDevice);
square<<<1, ARRAY_COUNT>>>(d_out, d_in);
cudaMemcpy(h_out, d_out, ARRAY_SIZE, cudaMemcpyDeviceToHost);
for(int i=0 ; i<ARRAY_COUNT ; ++i) {
printf("%f%s", h_out[i], (i+1)%4 ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return EXIT_SUCCESS;
}
|
5,634 | #include <stdlib.h>
#include "mesh.cuh"
#include "constants.cuh"
//------------------------------------------PROTOTIPES----------------------------------------------
void define_freedofs(int *fixeddofs, int *alldofs, int *freedofs, struct mesh *mesh);
//-----------------------------------------BODIES-------------------------------------------
void define_loads_support(int **fixeddofs,int **alldofs, int **freedofs, struct mesh *mesh) {
int index;
mesh->fixed_count = 0;
for (int i = 1; i <= 2*(mesh->nely + 1); i += 2) {
mesh->fixed_count++;
}
mesh->fixed_count++;
(*fixeddofs) = (int*)malloc((mesh->fixed_count) * sizeof(int));
(*alldofs) = (int*)malloc(2 * (mesh->nelx + 1)*(mesh->nely + 1) * sizeof(int));
(*freedofs) = (int*)malloc(((2 * (mesh->nelx + 1)*(mesh->nely + 1)) - mesh->fixed_count) * sizeof(int));
//define fixeddofs
//fixeddofs = union([1:2:2*(nely+1)],[2*(nelx+1)*(nely+1)]);
index = 0;
for (int i = 1; i <= 2 * (mesh->nely + 1); i += 2) {
(*fixeddofs)[index] = i;
index++;
}
(*fixeddofs)[mesh->fixed_count-1] = 2 * (mesh->nelx + 1)*(mesh->nely + 1);
//define alldofs
//alldofs = [1:2*(nely+1)*(nelx+1)];
for (int i = 0; i < (mesh->nely + 1)*(mesh->nelx + 1) * 2; i++) {
(*alldofs)[i] = i + 1;
}
define_freedofs((*fixeddofs), (*alldofs), (*freedofs), mesh);
}
void define_freedofs(int *fixeddofs, int *alldofs, int *freedofs, struct mesh *mesh) {
int free_flag,free_index=0;
//freedofs = setdiff(alldofs,fixeddofs);
for (int all_index = 0; all_index < (mesh->nely + 1)*(mesh->nelx + 1) * 2; all_index++) {
free_flag = 1;
for (int fixed_index = 0; fixed_index < mesh->fixed_count;fixed_index++) {
if (alldofs[all_index]==fixeddofs[fixed_index]) {
free_flag = 0;
}
}
if (free_flag == 1) {
freedofs[free_index] = alldofs[all_index];
free_index++;
}
}
} |
5,635 | #include "includes.h"
__global__ void mapAdjacencyToBlockKernel(int size, int *adjIndexes, int *adjacency, int *adjacencyBlockLabel, int *blockMappedAdjacency, int *fineAggregate) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
int begin = adjIndexes[idx];
int end = adjIndexes[idx + 1];
int thisBlock = fineAggregate[idx];
// Fill block labeled adjacency and block mapped adjacency vectors
for (int i = begin; i < end; i++)
{
int neighbor = fineAggregate[adjacency[i]];
if (thisBlock == neighbor)
{
adjacencyBlockLabel[i] = -1;
blockMappedAdjacency[i] = -1;
} else
{
adjacencyBlockLabel[i] = thisBlock;
blockMappedAdjacency[i] = neighbor;
}
}
}
} |
5,636 | #include<stdlib.h>
#include<stdio.h>
#include <cuda_runtime.h>
#define TILE_WIDTH 16
#define seed 13
__global__ void matrixMul(float *dev_A, float *dev_B, float *dev_C, int matrixWitdh)
{
__shared__ float A_tile[TILE_WIDTH][TILE_WIDTH];
__shared__ float B_tile[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float partial = 0.0;
int m;
for( m=0 ; m < matrixWitdh/TILE_WIDTH; m++){
A_tile[ty][tx] = dev_A[row * matrixWitdh + (m * TILE_WIDTH + tx)];
B_tile[ty][tx] = dev_B[col + (m * TILE_WIDTH + ty) * matrixWitdh];
__syncthreads();
int k;
for(k=0; k< TILE_WIDTH; k++)
partial += A_tile[ty][k] * B_tile[k][tx];
__syncthreads();
dev_C[row * matrixWitdh + col] = partial;
}
}
int main(int argc, char **argv){
srand(seed);
if(argc != 2){
printf("Usage /lab4_4 <matrixWitdh>");
return 1;
}
int matrixWitdh = atoi(argv[1]);
float *h_A = (float*) malloc(matrixWitdh * matrixWitdh * sizeof(float));
float *h_B = (float*) malloc(matrixWitdh * matrixWitdh * sizeof(float));
float *h_C = (float*) malloc(matrixWitdh * matrixWitdh * sizeof(float));
int i,j;
for(i=0;i<matrixWitdh;i++){
for(j=0;j<matrixWitdh;j++){
h_A[i * matrixWitdh + j] = (float)rand()/((float)RAND_MAX/10.0);
h_B[i * matrixWitdh + j] = (float)rand()/((float)RAND_MAX/10.0);
}
}
float *d_A, *d_B, *d_C;
cudaMalloc((void**) &d_A, matrixWitdh * matrixWitdh * sizeof(float));
cudaMalloc((void**) &d_B, matrixWitdh * matrixWitdh * sizeof(float));
cudaMalloc((void**) &d_C, matrixWitdh * matrixWitdh * sizeof(float));
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
dim3 dimGrid(matrixWitdh/TILE_WIDTH, matrixWitdh/TILE_WIDTH, 1);
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy(d_A, h_A, matrixWitdh * matrixWitdh * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, matrixWitdh * matrixWitdh * sizeof(float), cudaMemcpyHostToDevice);
matrixMul<<< dimGrid, dimBlock >>>(d_A, d_B, d_C, matrixWitdh);
cudaMemcpy(h_C, d_C, matrixWitdh* matrixWitdh * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("For tiled version, the elapsed time is %.4f(ms).\n", elapsedTime);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
} |
5,637 | //#include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void)
{
printf("threadIdx: (%d, %d, %d); blockIdx: (%d, %d, %d); blockDim: (%d, %d, %d); gridDim: (%d, %d, %d)",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z,
gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char* argv[])
{
// define total data element.
int nElem = 6;
// define grid and block structure.
dim3 block (3);
dim3 grid ((nElem+block.x-1)/block.x);
// check grid and block dimension from host side.
printf("grid.x %d, grid.y %d, grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d, block.y %d, block.z %d\n", block.x, block.y, block.z);
// check the grid and block dimension from device side.
checkIndex<<<grid, block>>>();
// reset defice before you leave.
cudaDeviceReset();
return 0;
}
|
5,638 | #include "includes.h"
__global__ void sum(float *a, float *b, float *c) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < N) {
c[index] = a[index] + b[index];
}
} |
5,639 | // Exhibits a bank conflict.
// Gklee and Gkleep both detect this.
#include <cstdio>
#define N 32
__global__ void bc(char* in, char* out)
{
__shared__ int smem[512];
int tid = threadIdx.x;
smem[tid*2]=in[tid];
__syncthreads();
smem[tid*4]=in[tid];
__syncthreads();
smem[tid*8]=in[tid];
__syncthreads();
int x = smem[tid * 2]; // 2-way bank conflicts
int y = smem[tid * 4]; // 4-way bank conflicts
int z = smem[tid * 8]; // 8-way bank conflicts
int m = max(max(x,y),z);
out[tid] = m;
}
int main()
{
char* in = (char*) malloc(N*sizeof(char));
for(int i = 0; i < N; i++)
in[i] = i;
char* din, * dout;
cudaMalloc((void**) &din, N*sizeof(char));
cudaMalloc((void**) &dout, N*sizeof(char));
cudaMemcpy(din, in, N*sizeof(char), cudaMemcpyHostToDevice);
bc<<<1,N>>>(din,dout);
cudaMemcpy(in, dout, N*sizeof(char), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%d ", in[i]);
printf("\n");
free(in); cudaFree(din); cudaFree(dout);
} |
5,640 | /*
HPC ASSIGNMENT 1 : QUESTION 3
Name : Arvind Sai K , Derik Clive
RollNo: 15CO207 , 15CO213
*/
#include <stdio.h>
#include<math.h>
#include <time.h>
#include <stdlib.h>
#define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) )
static void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
// CUDA Kernel for Vector Addition
__global__ void Vector_Addition ( const float *dev_a , const float *dev_b , float *dev_c, int *dev_N)
{
//Get the id of thread within a block
int tid = blockIdx.x*1024 + threadIdx.x;
if ( tid < *dev_N ) // check the boundry condition for the threads
{
dev_c [tid] = dev_a[tid] + dev_b[tid] ;
}
}
int main (void)
{
int N;
//Host array
printf("Enter number of array elements : ");
scanf("%d",&N);
float Host_a[N], Host_b[N], Host_c[N];
//Device array
float *dev_a , *dev_b, *dev_c ;
int *dev_N;
//fill the Host array with random elements on the CPU
srand(time(NULL));
for ( int i = 0; i <N ; i++ )
{
int a = rand();
int b = rand();
int c = rand();
int d = rand();
Host_a[i] = ((float)a)/(b+1);
Host_b[i] = ((float)c)/(d+1) ;
}
float timer;
cudaEvent_t start, stop;
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
cudaEventRecord(start, 0) ;
//Allocate the memory on the GPU
HANDLE_ERROR ( cudaMalloc((void **)&dev_a , N*sizeof(float) ) );
HANDLE_ERROR ( cudaMalloc((void **)&dev_b , N*sizeof(float) ) );
HANDLE_ERROR ( cudaMalloc((void **)&dev_c , N*sizeof(float) ) );
HANDLE_ERROR ( cudaMalloc((void **)&dev_N , sizeof(int) ) );
//Copy Host array to Device array
HANDLE_ERROR (cudaMemcpy (dev_a , Host_a , N*sizeof(float) , cudaMemcpyHostToDevice));
HANDLE_ERROR (cudaMemcpy (dev_b , Host_b , N*sizeof(float) , cudaMemcpyHostToDevice));
HANDLE_ERROR (cudaMemcpy (dev_N , &N , sizeof(int), cudaMemcpyHostToDevice));
int blockNos = ceil((float)(N)/1024);
//Make a call to GPU kernel
Vector_Addition <<< blockNos, 1024 >>> (dev_a , dev_b , dev_c,dev_N) ;
//Copy back to Host array from Device array
HANDLE_ERROR (cudaMemcpy(Host_c , dev_c ,N * sizeof(float) , cudaMemcpyDeviceToHost));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timer, start, stop);
printf("Time on GPU: %f ms \n", timer);
//Free the Device array memory
cudaFree (dev_a) ;
cudaFree (dev_b) ;
cudaFree (dev_c) ;
cudaFree (dev_N);
float timerc;
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
cudaEventRecord(start, 0) ;
int result[N];
for(int i = 0;i<N;++i){
result[i] = Host_a[i] + Host_b[i];
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timerc, start, stop);
printf("Time on CPU: %f ms \n", timerc);
int flag = 0;
for(int i=0;i<N;++i){
if(Host_a[i] + Host_b[i] != Host_c[i]){
flag = 1;
break;
}
}
if(flag){
printf("Wrong result \n");
}
else printf("Verified to be correct\n");
//system("pause");
return 0 ;
}
|
5,641 | //#include "kernel.cuh"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N 5000
int main()
{
system("pause");
return 0;
}
|
5,642 | #define IDX3(X, n1, n2, n3, i1, i2, i3) (X[(i1)*((n2)*(n3)) + (i2)*(n3) + (i3)])
template<class T>
__device__ void im2col_ker(const T *im, T *patches,
int im_ni, int im_nj, int nimgs,
int p_ni, int p_nj, int npatches)
{
int total_threads = gridDim.x * blockDim.x;
int patch = blockIdx.x * blockDim.x + threadIdx.x;
int patches_per_img = npatches / nimgs;
for (; patch < npatches; patch += total_threads) {
int im_k = patch / patches_per_img; /* image index */
int im_j0 = patch / (im_ni - p_ni + 1); /* patch topleft j in image */
int im_i0 = patch % (im_ni - p_ni + 1); /* patch topleft i in image */
for (int pj = 0; pj < p_nj; ++pj) {
for (int pi = 0; pi < p_ni; ++pi) {
IDX3(patches, npatches, p_nj, p_ni,
patch, pj, pi)
= IDX3(im, nimgs, im_nj, im_ni,
im_k, im_j0 + pj, im_i0 + pi);
}
}
}
}
template<class T>
__device__ void scol2im_ker(T *im, const T *patches,
int im_ni, int im_nj, int nimgs,
int p_ni, int p_nj, int npatches)
{
int total_threads = gridDim.x * blockDim.x;
int pixel = blockIdx.x * blockDim.x + threadIdx.x;
int valid_nj = im_nj - p_nj + 1;
int valid_ni = im_ni - p_ni + 1;
int npixels = nimgs * im_nj * im_ni;
int patches_per_img = npatches / nimgs;
for (; pixel < npixels; pixel += total_threads) {
T x = 0;
int im_k = pixel / (im_ni * im_nj); /* image index */
int im_j = pixel / im_ni; /* pixel in image */
int im_i = pixel % im_ni;
for (int pj = 0; pj < p_nj; ++pj) {
for (int pi = 0; pi < p_ni; ++pi) {
int im_pj = im_j - pj; /* topleft of patch in image */
int im_pi = im_i - pi; /* topleft of patch in image */
if (im_pi < 0 || im_pj < 0 ||
im_pj >= valid_nj || im_pi >= valid_ni)
continue;
int patch = im_k * patches_per_img + im_pj * valid_ni + im_pi;
x += IDX3(patches, npatches, p_nj, p_ni,
patch, pj, pi);
}
}
IDX3(im, nimgs, im_nj, im_ni,
im_k, im_j, im_i) = x;
}
}
__global__ void im2col_d(const double *im, double *patches,
int im_ni, int im_nj, int nimgs,
int p_ni, int p_nj, int npatches)
{
im2col_ker<double>(im, patches,
im_ni, im_nj, nimgs,
p_ni, p_nj, npatches);
}
__global__ void scol2im_d(double *im, const double *patches,
int im_ni, int im_nj, int nimgs,
int p_ni, int p_nj, int npatches)
{
scol2im_ker<double>(im, patches,
im_ni, im_nj, nimgs,
p_ni, p_nj, npatches);
}
__global__ void im2col_f(const float *im, float *patches,
int im_ni, int im_nj, int nimgs,
int p_ni, int p_nj, int npatches)
{
im2col_ker<float>(im, patches,
im_ni, im_nj, nimgs,
p_ni, p_nj, npatches);
}
__global__ void scol2im_f(float *im, const float *patches,
int im_ni, int im_nj, int nimgs,
int p_ni, int p_nj, int npatches)
{
scol2im_ker<float>(im, patches,
im_ni, im_nj, nimgs,
p_ni, p_nj, npatches);
}
|
5,643 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#include <time.h>
#define MAX_SHIFT 2
#define BLOCK_SIZE 128
#define SHIFT 3
using namespace std;
float* generateSimpleCyclicMatrix(int n, int m) {
srand(time(0));
float* a = new float[n * m];
vector<int> vec;
for (int i = 1; i * i < MAX_SHIFT * MAX_SHIFT; ++i) {
if (MAX_SHIFT % i == 0) {
vec.push_back(i);
}
}
vector<int> vec2;
for (int i = 0; i < m; ++i) {
int k = rand() % vec.size();
vec2.push_back(vec[k]);
vector<float> current_vector;
for (int j = 0; j < vec[k]; ++j) {
current_vector.push_back(rand() % 10);
}
for (int j = 0; j < n / vec[k]; ++j) {
for (int t = 0; t < vec[k]; ++t) {
a[i * n + j * vec[k] + t] = current_vector[t];
}
}
}
a[1] = 1;
return a;
}
float* generateCyclicShiftMatrix(float* a, int n, int m, int shift) {
float* b = new float[n * m];
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n - shift; ++j) {
b[i * n + j] = a[shift + i * n + j];
}
for (int j = n - shift, k = 0; k < shift && j < n; ++j, ++k) {
b[i * n + j] = a[i * n + k];
}
}
return b;
}
void findShifts(float* a, float* b, int& host_res, int n, int m) {
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
host_res &= b[i * n + j] == a[i * n + (j + SHIFT) % n];
}
}
}
__global__ void kernel(float* dev_a, float* dev_b, int* dev_res, int n) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int dev_res_private = 1;
__shared__ int dev_res_shared;
for (int j = 0; j < n; ++j) {
dev_res_private &= dev_b[row * n + j] == dev_a[row * n + (j + SHIFT) % n];
}
if (threadIdx.x == 0) dev_res_shared = 1;
__syncthreads();
//dev_res_shared &= dev_res_private;
atomicAnd(&dev_res_shared, dev_res_private);
__syncthreads();
if (threadIdx.x == 0) {
//*dev_res &= dev_res_shared;
atomicAnd(dev_res, dev_res_shared);//(dev_res,dev_res_private)
}
}
// BLOCK_SIZE 128
__global__ void kernel_shared(float* dev_a, float* dev_b, int* dev_res, int n, int m) {
int tx = threadIdx.x, bx = blockIdx.x;
//int row = bx * blockDim.x + tx;
int dev_res_private = 1;
__shared__ int dev_res_shared;
__shared__ float cache_a[128][33], cache_b[128][33];
for (int k = 0; k < n/32; k++){
//заполняем кеши
for (int p = 0; p < 32; p++ ){
cache_a[tx/32 + p*4][tx%32] = dev_a[( tx/32 + p*4 + bx*128 )*n + (tx%32 + k*32 + SHIFT) % n];
cache_b[tx/32 + p*4][tx%32] = dev_b[( tx/32 + p*4 + bx*128 )*n + tx%32 + k*32];
}
for (int j = 0; j < 32; ++j) {
dev_res_private &= cache_b[tx][j] == cache_a[tx][j];
}
}
if (threadIdx.x == 0) dev_res_shared = 1;
__syncthreads();
//dev_res_shared &= dev_res_private;
atomicAnd(&dev_res_shared, dev_res_private);
__syncthreads();
if (threadIdx.x == 0) {
//*dev_res &= dev_res_shared;
atomicAnd(dev_res, dev_res_shared);//(dev_res,dev_res_private)
}
}
int main(int argc, char** argv)
{
int n = 1024, m = 26880, shift = 1;
float* a = generateSimpleCyclicMatrix(n, m);
float* b = generateCyclicShiftMatrix(a, n, m, shift);
int res_CPU = true, *res_GPU = new int[1];
res_GPU[0] = true;
cudaEvent_t startCUDA, stopCUDA;
cudaEventCreate(&startCUDA);
cudaEventCreate(&stopCUDA);
clock_t startCPU;
float elapsedTimeCUDA, elapsedTimeCPU;
startCPU = clock();
findShifts(a, b, res_CPU, n, m);
elapsedTimeCPU = (double)(clock() - startCPU) / CLOCKS_PER_SEC;
cout << "CPU time = " << elapsedTimeCPU*1000 << " ms\n";
dim3 gridSize = dim3(m / BLOCK_SIZE, 1, 1);
dim3 blockSize = dim3(BLOCK_SIZE, 1, 1);
int* dev_res;
float* dev_a, * dev_b;
int sz = n * m * sizeof(float), sz_shifts = sizeof(int);
cudaMalloc(&dev_a, sz);
cudaMalloc(&dev_b, sz);
cudaMalloc(&dev_res, sz_shifts);
cudaMemcpy(dev_a, a, sz, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sz, cudaMemcpyHostToDevice);
cudaMemcpy(dev_res, res_GPU, sz_shifts, cudaMemcpyHostToDevice);
cudaEventRecord(startCUDA, 0);
kernel_shared <<<gridSize, blockSize>>> (dev_a, dev_b, dev_res, n, m);
cudaEventRecord(stopCUDA, 0);
cudaEventSynchronize(stopCUDA);
cudaEventElapsedTime(&elapsedTimeCUDA, startCUDA, stopCUDA);
cout << "CUDA time = " << elapsedTimeCUDA << " ms\n";
cudaMemcpy(res_GPU, dev_res, sz_shifts, cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_res);
cout << endl << "CPU result : " << res_CPU << endl;
cout << endl << "GPU result : " << res_GPU[0] << endl;
return 0;
}
|
5,644 | // To compile - gcc -o 3dFDTD FDTD3D.c -lm
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
// This was taken from stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define CHECK_ERROR(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
printf("GPU error: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
extern __global__ void loop4_GPU(double*** Hx, double*** Ez, double Da, double Db, int kmax, int jmax, int imax) {
int i, j;
int k = blockIdx.x * 32 + threadIdx.x;
if (k < kmax) {
for (j = 0; j < jmax-1; j++) {
for (i = 1; i < imax-1; i++) {
Hx[i][j][k] = Da*Hx[i][j][k] + Db*((Ez[i][j][k] - Ez[i][j+1][k]) + (Ez[i][j][k+1]-Ez[i][j][k]));
}
}
}
}
extern __global__ void loop5_GPU(double*** Hy, double*** Ez, double Da, double Db, int kmax, int jmax, int imax) {
int i, j;
int k = blockIdx.x * 32 + threadIdx.x;
if (k < kmax) {
for (j = 1; j < jmax-1; j++) {
for (i = 0; i < imax-1; i++) {
Hy[i][j][k] = Da*Hy[i][j][k] + Db*((Ez[i+1][j][k] - Ez[i][j][k]) + (Ez[i][j][k]-Ez[i][j][k+1]));
}
}
}
}
extern __global__ void loop6_GPU(double*** Hz, double*** Ez, double Da, double Db, int kmax, int jmax, int imax) {
int i, j;
int k = (blockIdx.x * 32 + threadIdx.x) + 1; // this loop starts at k=1 so we add 1
if (k < kmax) {
for (j = 0; j < jmax-1; j++) {
for (i = 0; i < imax-1; i++) {
Hz[i][j][k] = Da*Hz[i][j][k] + Db*((Ez[i][j][k] - Ez[i+1][j][k]) + (Ez[i][j+1][k]-Ez[i][j][k]));
}
}
}
}
int main() {
printf("Running main\n");
int imax = 100, jmax = 100, nmax = 1000, nhalf = 20, no = nhalf*3, kmax = 100;
int i, j, n,k;
double c = 2.99792458e8, pi = 3.141592654, sigma = 0, mu = 4.0 * pi * 1.0e-7, eps = 8.85418782e-12;
double delta = 1e-3;
double dt = delta/(c*1.41421356237);
double ***Ex, ***Ey, ***Ez, ***Hy, ***Hx, ***Hz;
//struct timeval tstart,tend;
//int sec,usec;
cudaEvent_t start_event, stop_event;
float elapsed_time;
Ex = (double ***)malloc((imax+1)*sizeof(double **));
Ey = (double ***)malloc((imax+1)*sizeof(double **));
Ez = (double ***)malloc((imax+1)*sizeof(double **));
Hx = (double ***)malloc((imax+1)*sizeof(double **));
Hy = (double ***)malloc((imax+1)*sizeof(double **));
Hz = (double ***)malloc((imax+1)*sizeof(double **));
for(i=0;i<(imax+1);i++) {
Ex[i] = (double **)malloc((jmax+1)*sizeof(double *));
Ey[i] = (double **)malloc((jmax+1)*sizeof(double *));
Ez[i] = (double **)malloc((jmax+1)*sizeof(double *));
Hx[i] = (double **)malloc((jmax+1)*sizeof(double *));
Hy[i] = (double **)malloc((jmax+1)*sizeof(double *));
Hz[i] = (double **)malloc((jmax+1)*sizeof(double *));
for(j=0;j<(jmax+1);j++) {
Ex[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Ey[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Ez[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Hx[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Hy[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Hz[i][j] = (double *)malloc((kmax+1)*sizeof(double));
}
}
for(k=0;k<(kmax+1);k++){
for(j=0;j<(jmax+1);j++){
for(i=0;i<(imax+1);i++){
Ex[i][j][k] = 0.0;
Ey[i][j][k] = 0.0;
Ez[i][j][k] = 0.0;
Hx[i][j][k] = 0.0;
Hy[i][j][k] = 0.0;
Hz[i][j][k] = 0.0;
}
}
}
double*** g_Hx;
double*** g_Hy;
double*** g_Hz;
double*** g_Ez;
//fprintf(fPointer, "allocating memory on GPU\n");
CHECK_ERROR(cudaMalloc((void**)&g_Hx, (imax+1)*sizeof(double**)));
CHECK_ERROR(cudaMalloc((void**)&g_Hy, (imax+1)*sizeof(double**)));
CHECK_ERROR(cudaMalloc((void**)&g_Hz, (imax+1)*sizeof(double**)));
CHECK_ERROR(cudaMalloc((void**)&g_Ez, (imax+1)*sizeof(double**)));
for(i=0;i<(imax+1);i++) {
CHECK_ERROR(cudaMalloc((void**)&g_Hx[i], (jmax+1)*sizeof(double*)));
CHECK_ERROR(cudaMalloc((void**)&g_Hy[i], (jmax+1)*sizeof(double*)));
CHECK_ERROR(cudaMalloc((void**)&g_Hz[i], (jmax+1)*sizeof(double*)));
CHECK_ERROR(cudaMalloc((void**)&g_Ez[i], (jmax+1)*sizeof(double*)));
for(j=0;j<(jmax+1);j++) {
CHECK_ERROR(cudaMalloc((void**)&g_Hx[i][j], (kmax+1)*sizeof(double)));
CHECK_ERROR(cudaMalloc((void**)&g_Hy[i][j], (kmax+1)*sizeof(double)));
CHECK_ERROR(cudaMalloc((void**)&g_Hz[i][j], (kmax+1)*sizeof(double)));
CHECK_ERROR(cudaMalloc((void**)&g_Ez[i][j], (kmax+1)*sizeof(double)));
}
}
double Ca,Cb,Da,Db;
Ca = (1-((sigma*dt)/(2*eps)))/(1+((sigma*dt)/(2*eps)));
Cb = (dt/(eps*delta))/(1+((sigma*dt)/(2*eps)));
Da = (1-((sigma*dt)/(2*mu)))/(1+((sigma*dt)/(2*mu)));
Db = (dt/(mu*delta))/(1+((sigma*dt)/(2*mu)));
FILE * fPointer;
fPointer = fopen("myoutput3d.dat","w");
CHECK_ERROR(cudaEventCreate(&start_event));
CHECK_ERROR(cudaEventCreate(&stop_event));
CHECK_ERROR(cudaEventRecord(start_event, 0));
for (n = 0; n < nmax; n++) {
char buf[18];
memset(buf, 0, 18);
sprintf(buf, "inside n loop\n");
fputs(buf, fPointer);
for (k = 1; k < kmax; k++) {
for (j = 1; j < jmax; j++) {
for (i = 0; i < imax; i++) {
Ex[i][j][k] = Ca*Ex[i][j][k] + Cb*((Hz[i][j][k] - Hy[i][j-1][k]) + (Hy[i][j][k-1] - Hy[i][j][k]));
}
}
}
for (k = 1; k < kmax; k++) {
for (j = 0; j < jmax; j++) {
for (i = 1; i < imax; i++) {
Ey[i][j][k] = Ca*Ey[i][j][k] + Cb*((Hz[i-1][j][k] - Hy[i][j][k]) + (Hy[i][j][k] - Hy[i][j][k-1]));
}
}
}
for (k = 0; k < kmax; k++) {
for (j = 1; j < jmax; j++) {
for (i = 1; i < imax; i++) {
Ez[i][j][k] = Ca*Ez[i][j][k] + Cb*((Hz[i][j][k] - Hy[i-1][j][k]) + (Hy[i][j-1][k] - Hy[i][j][k]));
}
}
}
Ez[imax/2][jmax/2][kmax/2] = exp(-(pow(((n-no)/(double)nhalf),2.0)));
fprintf(fPointer, "Copying memory to GPU\n");
for(i=0;i<(imax+1);i++) {
for(j=0;j<(jmax+1);j++) {
CHECK_ERROR(cudaMemcpy(g_Hx[i][j], Hx[i][j], (kmax+1)*sizeof(double), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(g_Hy[i][j], Hy[i][j], (kmax+1)*sizeof(double), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(g_Hz[i][j], Hz[i][j], (kmax+1)*sizeof(double), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(g_Ez[i][j], Ez[i][j], (kmax+1)*sizeof(double), cudaMemcpyHostToDevice));
}
}
fprintf(fPointer, "Running loops on GPU\n");
dim3 threadsPerBlock(32);
dim3 numBlocks((kmax + threadsPerBlock.x-1) / threadsPerBlock.x);
loop4_GPU<<<numBlocks, threadsPerBlock>>>(g_Hx, g_Ez, Da, Db, kmax, jmax, imax);
loop5_GPU<<<numBlocks, threadsPerBlock>>>(g_Hy, g_Ez, Da, Db, kmax, jmax, imax);
loop6_GPU<<<numBlocks, threadsPerBlock>>>(g_Hz, g_Ez, Da, Db, kmax, jmax, imax);
fprintf(fPointer, "Copying results back to host\n");
for(i=0;i<(imax+1);i++) {
for(j=0;j<(jmax+1);j++) {
CHECK_ERROR(cudaMemcpy(Hx[i][j], g_Hx[i][j], (kmax+1)*sizeof(double), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(Hy[i][j], g_Hy[i][j], (kmax+1)*sizeof(double), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(Hz[i][j], g_Hz[i][j], (kmax+1)*sizeof(double), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(Ez[i][j], g_Ez[i][j], (kmax+1)*sizeof(double), cudaMemcpyDeviceToHost));
}
}
}
fprintf(fPointer, "Freeing memory on GPU\n");
for(i=0;i<(imax+1);i++) {
for(j=0;j<(jmax+1);j++) {
CHECK_ERROR(cudaFree(g_Hx[i][j]));
CHECK_ERROR(cudaFree(g_Hy[i][j]));
CHECK_ERROR(cudaFree(g_Hz[i][j]));
CHECK_ERROR(cudaFree(g_Ez[i][j]));
}
CHECK_ERROR(cudaFree(g_Hx[i]));
CHECK_ERROR(cudaFree(g_Hy[i]));
CHECK_ERROR(cudaFree(g_Hz[i]));
CHECK_ERROR(cudaFree(g_Ez[i]));
}
CHECK_ERROR(cudaFree(g_Hx));
CHECK_ERROR(cudaFree(g_Hy));
CHECK_ERROR(cudaFree(g_Hz));
CHECK_ERROR(cudaFree(g_Ez));
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time, start_event, stop_event);
fclose(fPointer);
printf("GPU Time: %.2f\n", elapsed_time);
return 0;
}
|
5,645 | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
// This code assumes that your device support block size of 1024
#define MAX_RANGE 9999
#define funcCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
printf( "Failed to run stmt %d ", __LINE__); \
printf( "Got CUDA error ... %s ", cudaGetErrorString(err)); \
return -1; \
} \
} while(0)
// Compute C = A * B
__global__ void matrixMultiplyShared(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
__shared__ float sA[32][32]; // Tile size of 32x32
__shared__ float sB[32][32];
int Row = blockDim.y*blockIdx.y + threadIdx.y;
int Col = blockDim.x*blockIdx.x + threadIdx.x;
float Cvalue = 0.0;
sA[threadIdx.y][threadIdx.x] = 0.0;
sB[threadIdx.y][threadIdx.x] = 0.0;
for (int k = 0; k < (((numAColumns - 1)/ 32) + 1); k++)
{
if ( (Row < numARows) && (threadIdx.x + (k*32)) < numAColumns)
{
sA[threadIdx.y][threadIdx.x] = A[(Row*numAColumns) + threadIdx.x + (k*32)];
}
else
{
sA[threadIdx.y][threadIdx.x] = 0.0;
}
if ( Col < numBColumns && (threadIdx.y + k*32) < numBRows)
{
sB[threadIdx.y][threadIdx.x] = B[(threadIdx.y + k*32)*numBColumns + Col];
}
else
{
sB[threadIdx.y][threadIdx.x] = 0.0;
}
__syncthreads();
for (int j = 0; j < 32; ++j)
{
Cvalue += sA[threadIdx.y][j] * sB[j][threadIdx.x];
}
}
if (Row < numCRows && Col < numCColumns)
{
C[Row*numCColumns + Col] = Cvalue;
}
}
void matMultiplyOnHost(float * A, float * B, float * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
for (int i=0; i < numARows; i ++)
{
for (int j = 0; j < numAColumns; j++)
{
C[i*numCColumns + j ] = 0.0;
for (int k = 0; k < numCColumns; k++)
{
C[i*numCColumns + j ] += A[i*numAColumns + k] * B [k*numBColumns + j];
}
}
}
return;
}
int main(int argc, char ** argv) {
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * hostComputedC;
float * deviceA;
float * deviceB;
float * deviceC;
// Please adjust rows and columns according to you need.
int numARows = 512; // number of rows in the matrix A
int numAColumns = 512; // number of columns in the matrix A
int numBRows = 512; // number of rows in the matrix B
int numBColumns = 512; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
hostA = (float *) malloc(sizeof(float)*numARows*numAColumns);
hostB = (float *) malloc(sizeof(float)*numBRows*numBColumns);
for (int i = 0; i < numARows*numAColumns; i++)
{
hostA[i] = (rand() % MAX_RANGE) / 2.0;
}
for (int i = 0; i < numBRows*numBColumns; i++)
{
hostB[i] = (rand() % MAX_RANGE) / 2.0;
}
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
hostC = (float *) malloc(sizeof(float)*numCRows*numCColumns);
hostComputedC = (float *) malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
funcCheck(cudaMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
funcCheck(cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns));
funcCheck(cudaMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
// Copy memory to the GPU
funcCheck(cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice));
funcCheck(cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice));
// Initialize the grid and block dimensions
dim3 dimBlock(32, 32, 1);
dim3 dimGrid((numCColumns/32) + 1, (numCRows/32) + 1, 1);
//@@ Launch the GPU Kernel here
matrixMultiplyShared<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaError_t err1 = cudaPeekAtLastError();
cudaDeviceSynchronize();
printf( "Got CUDA error ... %s \n", cudaGetErrorString(err1));
// Copy the results in GPU memory back to the CPU
funcCheck(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
matMultiplyOnHost(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
for (int i=0; i < numCColumns*numCRows; i++)
{
if (hostComputedC[i] != hostC[i] )
{
printf("Mismatch at Row = %d Col = %d hostComputed[] = %f --device[] %f\n", i / numCColumns, i % numCColumns, hostComputedC[i], hostC[i]);
break;
}
}
// Free the GPU memory
funcCheck(cudaFree(deviceA));
funcCheck(cudaFree(deviceB));
funcCheck(cudaFree(deviceC));
free(hostA);
free(hostB);
free(hostC);
free(hostComputedC);
return 0;
} |
5,646 | /***************************************************************************
**************************************************************************
Spherical Harmonic Transform Kit 2.7
Copyright 1997-2003 Sean Moore, Dennis Healy,
Dan Rockmore, Peter Kostelec
Copyright 2004 Peter Kostelec, Dan Rockmore
This file is part of SpharmonicKit.
SpharmonicKit is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
SpharmonicKit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
See the accompanying LICENSE file for details.
************************************************************************
************************************************************************/
/* quadrature weights file */
/* contains precomputed arrays of Legendre quadrature weight values */
/***
The interface function to these arrays is defined at the end
of this file. It is
const double *get_weights( int bw )
***/
__device__ __constant__ double w4[8] =
{0.06698294569888951, 0.222987933015572, 0.3241525190659486,
0.3858766022239052, 0.3858766022242057, 0.3241525190660357,
0.222987933015605, 0.06698294569889861};
__device__ __constant__ double w8[16] =
{0.0168027552304648, 0.058336464069326, 0.0917183132052142,
0.1251296181582697, 0.1513924616369861, 0.1734194117717632,
0.1877497281148386, 0.1954512478118618, 0.1954512478124817,
0.187749728115036, 0.1734194117718737, 0.1513924616370552,
0.1251296181583115, 0.0917183132052395, 0.0583364640693394,
0.0168027552304693};
__device__ __constant__ double w16[32] =
{0.004204256199901001, 0.014748328130096, 0.023629778419644,
0.033237125953057, 0.041849747749195, 0.050571719994984, 0.05840156832621,
0.065996706406676, 0.07268790949655301, 0.078899622945272,
0.084170977199815, 0.088777632866959, 0.092414154989675, 0.095247804861176,
0.097103167890767, 0.09805949856685, 0.098059498568105, 0.09710316789118,
0.095247804861419, 0.092414154989843, 0.088777632867086, 0.084170977199914,
0.07889962294535, 0.07268790949661499, 0.06599670640672201,
0.05840156832624701, 0.050571719995013, 0.04184974774921601,
0.033237125953072, 0.023629778419653, 0.014748328130102, 0.004204256199904};
__device__ __constant__ double w32[64] =
{0.001051287070134, 0.00369738253841, 0.005951729000078, 0.00843412760248,
0.010722073632584, 0.013119273230032, 0.015375045276171, 0.017685873258142,
0.019875170205663, 0.022085297687726, 0.02418169220008, 0.026273624541033,
0.028254130235429, 0.03020985118730501, 0.032053730303588,
0.033855733201296, 0.035544150136538, 0.037175968389769, 0.038691923240227,
0.040138463486499, 0.04146682983648501, 0.042714610025193,
0.04384221112648501, 0.044879543239542, 0.045795238152625,
0.04661237251476801, 0.047307138466904, 0.04789637733659801,
0.048363381016468, 0.048719165059896, 0.048953818726571, 0.049072788071371,
0.04907278807389301, 0.048953818727408, 0.04871916506039601,
0.048363381016822, 0.04789637733687, 0.047307138467123, 0.046612372514951,
0.04579523815278, 0.044879543239676, 0.043842211126604,
0.04271461002529901, 0.04146682983657701, 0.04013846348658201,
0.0386919232403, 0.037175968389834, 0.035544150136597, 0.033855733201345,
0.032053730303631, 0.030209851187344, 0.028254130235463, 0.026273624541062,
0.024181692200106, 0.022085297687748, 0.019875170205683, 0.017685873258158,
0.015375045276184, 0.013119273230042, 0.010722073632593, 0.008434127602487,
0.005951729000084, 0.003697382538416, 0.001051287070135};
__device__ __constant__ double w64[128] =
{0.000262835707197, 0.000924989999111, 0.001490708223128, 0.002116377084453,
0.002696897333212, 0.003309996179437, 0.003893120261077, 0.004497543115599,
0.005078810319213, 0.005675008203167, 0.00625174592528, 0.006839175137552,
0.007409342588573999, 0.007987078618147001, 0.008548922859887,
0.009115873663663, 0.009667799842016, 0.010222796978386, 0.010763311740366,
0.011305155615914, 0.01183284018059, 0.012360325369511, 0.012873822157835,
0.013385753079517, 0.013883759122364, 0.014378960637318, 0.01486022460612,
0.015337549735279, 0.015800871005997, 0.016259206915535, 0.016703435812042,
0.017141708689126, 0.017565747421518, 0.017982926599289, 0.018385730608967,
0.018780832153766, 0.019161411686675, 0.019533501577973, 0.019890923371016,
0.020239120355893, 0.020572509359939, 0.020895987534359, 0.021204528621207,
0.021502519771847, 0.021785459387897, 0.022057255116519, 0.022313902856023,
0.022558856500676, 0.022788586578443, 0.023006114940762, 0.023208367548925,
0.023397952433377, 0.023572234970434, 0.023733424539198, 0.023879312701991,
0.02401172264761, 0.024128861378958, 0.024232175915971, 0.02432028020224,
0.024394252878361, 0.02445310839246, 0.024497562719501, 0.024527026305793,
0.0245418562091, 0.024541856214155, 0.024527026307477, 0.02449756272051,
0.024453108393178, 0.024394252878919, 0.024320280202694, 0.024232175916354,
0.024128861379288, 0.0240117226479, 0.023879312702248, 0.023733424539429,
0.023572234970644, 0.023397952433567, 0.0232083675491, 0.023006114940923,
0.022788586578593, 0.022558856500815, 0.022313902856155, 0.022057255116642,
0.021785459388013, 0.021502519771957, 0.021204528621309, 0.020895987534455,
0.020572509360028, 0.020239120355979, 0.019890923371095, 0.019533501578048,
0.019161411686746, 0.018780832153833, 0.018385730609031, 0.01798292659935,
0.017565747421574, 0.017141708689177, 0.01670343581209, 0.016259206915579,
0.015800871006039, 0.015337549735318, 0.014860224606157, 0.014378960637354,
0.013883759122397, 0.013385753079548, 0.012873822157864, 0.012360325369538,
0.011832840180615, 0.011305155615937, 0.010763311740387, 0.010222796978406,
0.009667799842034, 0.00911587366368, 0.008548922859902,
0.007987078618162001, 0.007409342588588, 0.006839175137563,
0.00625174592529, 0.005675008203175001, 0.005078810319221,
0.004497543115606001, 0.003893120261083, 0.003309996179442,
0.002696897333215, 0.002116377084455, 0.00149070822313, 0.000924989999113,
0.000262835707198};
/************************************************************************/
/* returns a pointer to the proper quadrature weights for a given
bandwidth bw. Weights array is of size 2*bw. A closed form for
the weights can be found in the original Driscoll and Healy paper,
but need to be normalized */
__device__ double *get_weights(int bw)
{
switch(bw)
{
case 4: return w4;
case 8: return w8;
case 16: return w16;
case 32: return w32;
case 64: return w64;
default: return 0;
}
}
|
5,647 | #include "FloatVector.cuh"
FloatVector::FloatVector(long height, FLOAT_VEC_TYPE* data) {
this->height = height;
this->data = data;
}
long FloatVector::getHeight() {
return height;
}
FLOAT_VEC_TYPE * FloatVector::getData() {
return data;
} |
5,648 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <ctime>
typedef unsigned char uchar;
#define EROSION_SIZE 5
const int EROSION_MATRIX[EROSION_SIZE * EROSION_SIZE] = {
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
1, 1, 1, 1, 1,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0
};
#define EMPTY 0
#define FULL 255
#define THREAD_CNT 1024
__global__ void erosion_kernel(uchar *image, uchar *res, int *matrix, int rows, int cols) {
int i = threadIdx.x;
int begin = i * rows / THREAD_CNT;
int end = min((i + 1) * rows / THREAD_CNT, rows);
for (int pos = begin; pos < end; ++pos) {
uchar* aim = res + (pos * cols);
if (pos < EROSION_SIZE / 2 || pos >= rows - EROSION_SIZE / 2) {
for (int l = 0; l < cols; ++l)
aim[l] = FULL;
continue;
}
uchar* cache[EROSION_SIZE];
for (int i = 0; i < EROSION_SIZE; ++i) {
cache[i] = image + (i + pos - EROSION_SIZE / 2) * cols;
}
int l;
for (l = 0; l < EROSION_SIZE / 2 && l < cols; ++l) {
aim[l] = FULL;
}
for (; l < cols - EROSION_SIZE / 2; ++l) {
bool result = true;
for (int i = 0; i < EROSION_SIZE && result; ++i) {
for (int j = 0; j < EROSION_SIZE; ++j) {
if (matrix[i * EROSION_SIZE + j] && !cache[i][j + l - EROSION_SIZE / 2]) {
result = false;
break;
}
}
}
if (result) aim[l] = FULL;
else aim[l] = EMPTY;
}
for (; l < cols; ++l)
aim[l] = FULL;
}
}
void erosion_image(uchar *image_data, uchar *res_data, int rows, int cols) {
int size = rows * cols;
int *dev_matrix = 0;
uchar *dev_image_data = 0;
uchar *dev_res_data = 0;
clock_t begin;
clock_t end;
double timeCost;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_image_data, size * sizeof(uchar));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_res_data, size * sizeof(uchar));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_matrix, EROSION_SIZE * EROSION_SIZE * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_image_data, image_data, size * sizeof(uchar), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// cudaStatus = cudaMemcpy(dev_res_data, res_data, size * sizeof(uchar), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
cudaStatus = cudaMemcpy(dev_matrix, EROSION_MATRIX, EROSION_SIZE * EROSION_SIZE * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
begin = clock();
erosion_kernel<<<1, THREAD_CNT>>>(dev_image_data, dev_res_data, dev_matrix, rows, cols);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
end = clock();
timeCost = (double)(end - begin)/CLOCKS_PER_SEC;
fprintf(stderr, "time: %lf\n", timeCost);
cudaStatus = cudaMemcpy(res_data, dev_res_data, size * sizeof(uchar), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
Error:
cudaFree(dev_image_data);
cudaFree(dev_res_data);
cudaFree(dev_matrix);
cudaDeviceReset();
} |
5,649 | #include <stdio.h>
#define WIDTH_ARRAY 10
#define HEIGHT_ARRAY 10
__global__ void shareArray (int *modArray, int *sum) {
int i, j, n, index, countVal;
__shared__ int sumColumn[WIDTH_ARRAY],
tmpModArray[WIDTH_ARRAY][HEIGHT_ARRAY];
i = threadIdx.x;
j = threadIdx.y;
index = i + j * WIDTH_ARRAY;
tmpModArray[i][j] = -1 * modArray[index];
__syncthreads();
if (threadIdx.y == 0) {
countVal = 0;
for (n = 0; n < HEIGHT_ARRAY; n++)
countVal = countVal + tmpModArray[i][n];
sumColumn[i] = countVal;
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
countVal = 0;
for (n = 0; n < WIDTH_ARRAY; n++)
countVal = countVal + sumColumn[n];
sum[0] = countVal;
}
}
__host__ int main (void) {
int index, i, j;
int *modArray, *gpu_modArray, *sum, *gpu_sum;
size_t sizeArray;
sizeArray = WIDTH_ARRAY * HEIGHT_ARRAY * sizeof(int);
modArray = (int*) malloc( sizeArray );
cudaMalloc( &gpu_modArray, sizeArray );
sum = (int*) malloc( sizeof(int) );
cudaMalloc( &gpu_sum, sizeof(int) );
printf("original values\n");
for ( i = 0; i < WIDTH_ARRAY; i++ ) {
for ( j = 0; j < HEIGHT_ARRAY; j++ ) {
index = i + j * WIDTH_ARRAY;
modArray[index] = index + 1;
printf("%d ", modArray[index]);
}
printf("\n");
}
cudaMemcpy( gpu_modArray, modArray,
sizeArray, cudaMemcpyHostToDevice );
dim3 threads1(WIDTH_ARRAY, HEIGHT_ARRAY);
shareArray <<< 1, threads1 >>> (gpu_modArray, gpu_sum);
cudaMemcpy( sum, gpu_sum,
sizeof(int), cudaMemcpyDeviceToHost );
printf("\nfinal sum\n %d\n\n", sum[0]);
free( modArray );
cudaFree( gpu_modArray );
return 0;
}
|
5,650 | // Note that in this model we do not check
// the error codes and status of kernel call.
/*
In this exercise we will write a simple cuda program that sets the value of an array: A[i] = i.
Take a look at the file set.cu, that includes a skeleton of the code.
Here we will complete the code by completing these steps (a TODO exists for each step):
Allocate memory for the device array d_A
Free memory for the device array d_A
Complete the kernel code. The kernel assigns the global thread index to each element in the vector
Call the kernel with two arguments, pointer to the allocated device memory and the length of the array.
Copy the result vector from device memory to host memory buffer
Pay close attention to the kernel call parameters, block and grid sizes!
Can you write the kernel so that it functions even if you launch too many threads?
*/
#include <cstdio>
#include <cmath>
__global__ void set(int* A, int N){
// TODO 3 - Complete kernel code
// The kernel assigns the global thread index to each element in the vector
// 1D grid of 1D blocks
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
A[idx] = idx;
}
int main(void){
const int N = 128;
int* d_A;
int* h_A;
h_A = (int*)malloc(N * sizeof(int));
// TODO 1 - Allocate memory for device pointer d_A
//cudaMalloc(&d_A, N * sizeof(int)));
cudaMalloc((void**)&d_A, N * sizeof(int));
// TODO 4 - Call kernel set()
set<<<2,64>>>(d_A, N);
// TODO 5 - Copy the results from device memory
cudaMemcpy(h_A, d_A, N * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%i ", h_A[i]);
}
printf("\n");
free(h_A);
// TODO 2 - Free memory for device pointer d_A
//cudaFree(d_A)
cudaFree((void*)d_A);
return 0;
} |
5,651 | #include <stdio.h>
#include <stdint.h>
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
#define BLOCK_SIZE 8
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
uint64_t* elements;
} Matrix;
// Forward declaration of the matrix multiplication kernel
__global__ void MatVecMulKernel(Matrix, uint64_t* , uint64_t*);
__global__ void transposeNaive(Matrix, Matrix, int, int);
// Matrix-Vector multiplication - Host code
void MatVecMul(Matrix A, uint64_t* B, uint64_t* C, int vecsize)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(uint64_t);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
uint64_t *d_B;
size = vecsize * sizeof(uint64_t);
cudaMalloc(&d_B, size);
cudaMemcpy(d_B, B, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
uint64_t *d_C;
size = vecsize * sizeof(uint64_t);
cudaMalloc(&d_C, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((1 + dimBlock.x - 1) / dimBlock.x, (A.height + dimBlock.y - 1) / dimBlock.y);
MatVecMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C , d_C, size,
cudaMemcpyDeviceToHost);
/*
for(int i = 0; i < A.height; i++){
printf("%llu ", C[i]);
printf("\n");
}
printf("\n");
*/
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B);
cudaFree(d_C);
}
// MV multiplication kernel called by MatMul()
__global__ void MatVecMulKernel(Matrix A, uint64_t* B, uint64_t* C)
{
uint64_t Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e){
B[e+col] = (B[e+col] == 0) ? 0x00 : 0xffffffffffffffff;
}
if(row > A.height || col > A.width) return;
for (int e = 0; e < A.width; ++e)
Cvalue = Cvalue ^ (A.elements[row * A.width + e] & B[e + col]);
C[row + col] = Cvalue;
}
void Trans(Matrix A, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(uint64_t);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
//size = C.width * C.height * sizeof(uint64_t);
size = C.width * C.height * sizeof(uint64_t);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimGrid(A.width/TILE_DIM, A.height/TILE_DIM, 1);
dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
//transposeNaive<<<dimGrid, dimBlock>>>(d_C, d_A, A.width, A.height );
// Read C from device memory
cudaMemcpy(C.elements , d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_C.elements);
}
__global__ void transposeNaive(Matrix odata, Matrix idata, int width, int height)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if (xIndex < width && yIndex < height)
{
unsigned int index_in = xIndex + width * yIndex;
unsigned int index_out = yIndex + height * xIndex;
odata.elements[index_out] = idata.elements[index_in];
}
}
int main() {
Matrix A;
uint64_t *B;
uint64_t *C;
int w, h;
w = 384;
h = 1040;
uint64_t mat[1040][384];
FILE *myfile;
uint64_t myvariable;
int i;
int j;
//Read in G.m example matrix from repo
myfile=fopen("./src/mat.txt", "r");
for(i = 0; i < 1040; i++){
for (j = 0 ; j < 384; j++)
{
fscanf(myfile,"%llu",&myvariable);
mat[i][j] = myvariable;
//printf ("%llu", mat[i][j]);
}
//printf ("\n");
}
fclose(myfile);
A.height = h;
A.width = w;
A.elements = &mat[0][0]; //(uint64_t*)malloc(A.width * A.height * sizeof(uint64_t));
B = (uint64_t*) malloc(h * sizeof(uint64_t));
for(int i = 0; i < h; i++){
B[i] = 2;//rand() % 2;
}
C = (uint64_t*) malloc(h * sizeof(uint64_t));
/*
D.height = w;
D.width = h;
D.elements = (uint64_t*)malloc(D.width * D.height * sizeof(uint64_t));
We do not need to transpose because we have index access to columns Trans(A,D);
*/
MatVecMul(A,B,C, h);
/*
for(int i = 0; i < h; i++){
for(int j = 0; j < w; j++)
printf("%llu ", A.elements[i*A.width + j]);
printf("\n");
}
printf("\n");
for(int i = 0; i < h; i++){
printf("%llu ", B[i]);
printf("\n");
}
printf("\n");
*/
for(int i = 0; i < A.height; i++){
printf("%llu ", C[i]);
printf("\n");
}
printf("\n");
return 0;
}
|
5,652 | // Liao 11/30/2011
// based on liao6@tux322:/usr/local/cuda/include/cuda_runtime_api.h
#include <stdio.h>
int main()
{
cudaDeviceProp prop;
int count;
cudaGetDeviceCount (&count);
for (int i =0; i< count; i++)
{
cudaGetDeviceProperties (&prop, i);
printf ("Name: %s\n", prop.name);
printf ("Global Mem: %u\n", prop.totalGlobalMem);
printf ("Shared Mem per Block: %d\n", prop.sharedMemPerBlock);
printf ("regs per block: %d\n", prop.regsPerBlock);
printf ("warpSize: %d\n", prop.warpSize);
printf ("memPitch: %d\n", prop.memPitch);
printf ("maxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock);
printf ("maxThreadsDim[0]: %d\n", prop.maxThreadsDim[0]);
printf ("maxThreadsDim[1]: %d\n", prop.maxThreadsDim[1]);
printf ("maxThreadsDim[2]: %d\n", prop.maxThreadsDim[2]);
printf ("maxGridSize[0]: %d\n", prop.maxGridSize[0]);
printf ("maxGridSize[1]: %d\n", prop.maxGridSize[1]);
printf ("maxGridSize[2]: %d\n", prop.maxGridSize[2]);
printf ("clockRate: %d\n", prop.clockRate);
printf ("totalConstMem: %d\n", prop.totalConstMem);
printf ("major: %d\n", prop.major);
printf ("minor: %d\n", prop.minor);
printf ("textureAlignment: %d\n", prop.textureAlignment);
printf ("deviceOverlap: %d\n", prop.deviceOverlap);
printf ("multiProcessorCount: %d\n", prop.multiProcessorCount);
printf ("kernelExecTimeoutEnabled: %d\n", prop.kernelExecTimeoutEnabled);
printf ("integrated: %d\n", prop.integrated);
printf ("canMapHostMemory: %d\n", prop.canMapHostMemory);
printf ("computeMode: %d\n", prop.computeMode);
printf ("maxTexture1D: %d\n", prop.maxTexture1D);
printf ("maxTexture2D[0]: %d\n", prop.maxTexture2D[0]);
printf ("maxTexture2D[1]: %d\n", prop.maxTexture2D[1]);
printf ("maxTexture3D[0]: %d\n", prop.maxTexture3D[0]);
printf ("maxTexture3D[1]: %d\n", prop.maxTexture3D[1]);
printf ("maxTexture3D[2]: %d\n", prop.maxTexture3D[2]);
printf ("maxTexture1DLayered[0]: %d\n", prop.maxTexture1DLayered[0]);
printf ("maxTexture1DLayered[1]: %d\n", prop.maxTexture1DLayered[1]);
printf ("maxTexture2DLayered[0]: %d\n", prop.maxTexture2DLayered[0]);
printf ("maxTexture2DLayered[1]: %d\n", prop.maxTexture2DLayered[1]);
printf ("maxTexture2DLayered[2]: %d\n", prop.maxTexture2DLayered[2]);
printf ("surfaceAlignment: %d\n", prop.surfaceAlignment);
printf ("concurrentKernels: %d\n", prop.concurrentKernels);
printf ("ECCEnabled: %d\n", prop.ECCEnabled);
printf ("pciBusID: %d\n", prop.pciBusID);
printf ("pciDeviceID: %d\n", prop.pciDeviceID);
printf ("pciDomainID: %d\n", prop.pciDomainID);
printf ("tccDriver: %d\n", prop.tccDriver);
printf ("asyncEngineCount: %d\n", prop.asyncEngineCount);
printf ("unifiedAddressing: %d\n", prop.unifiedAddressing);
printf ("memoryClockRate: %d\n", prop.memoryClockRate);
printf ("memoryBusWidth: %d\n", prop.memoryBusWidth);
printf ("l2CacheSize: %d\n", prop.l2CacheSize);
printf ("maxThreadsPerMultiProcessor: %d\n", prop.maxThreadsPerMultiProcessor);
}
return 0;
}
|
5,653 | #include "includes.h"
__global__ void sync_streams(){} |
5,654 | #include "includes.h"
__global__ void kern_PropogateUp(float* working, int span, int imageSize)
{
int idx = CUDASTDOFFSET;
float inputValue1 = working[idx];
float inputValue2 = working[idx+span];
float outputVal = (inputValue1 > inputValue2) ? inputValue1: inputValue2;
if(idx+span < imageSize)
{
working[idx] = outputVal;
}
} |
5,655 | #include <cuda_runtime.h>
__device__
void positive(float x, int &counter) {
if (x > 0.) {
atomicAdd(&counter, 1);
}
}
__global__
void kernel(float *data, int *counter, unsigned int size) {
__shared__ int shared;
if (threadIdx.x == 0) {
shared = 0;
}
__syncthreads();
auto first_thread = threadIdx.x + blockIdx.x * blockDim.x;
auto grid_size = gridDim.x * blockDim.x;
for (auto i = first_thread; i < size; i += grid_size) {
positive(data[i], shared);
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(counter, shared);
}
}
#include <cstdlib>
#include <iostream>
#include <random>
#include <vector>
const unsigned int size = 9999;
int main() {
int counter = 0;
std::vector<float> data(size, 0.);
// generate the input data
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution(-1.0, 1.0);
for (auto &x : data)
x = distribution(generator);
float *data_d;
cudaMalloc(&data_d, size * sizeof(float));
cudaMemcpy(data_d, data.data(), size * sizeof(float), cudaMemcpyHostToDevice);
int *counter_d;
cudaMalloc(&counter_d, sizeof(int));
cudaMemset(counter_d, 0, sizeof(int));
kernel<<<32, 32>>>(data_d, counter_d, size);
cudaMemcpy(&counter, counter_d, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << counter << std::endl;
cudaFree(counter_d);
cudaFree(data_d);
}
|
5,656 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__device__ int getTid()
{
int bid = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int tPB = blockDim.x * blockDim.y ;
int fin = bid*tPB+tid;
}
__global__ void mulElement(int *a ,int *b , int *c , int ha , int wb,int wa)
{
int th = getTid();
if(th<(ha*wb))
{
int row = th/wb;
int col = th%wb;
int i = 0 , sum = 0;
for(i = 0;i<wa;i++)
{
sum += a[row*wa+i]*b[wb*i+col];
}
c[th] = sum;
}
}
int main(void)
{
int *a,*b,*t,i,j;
int *d_a,*d_b,*d_t;
int ha , wa;
int hb , wb;
printf("Enter the dimensions of first matrix \n ");
scanf("%d %d",&ha,&wa);
printf("Enter the dimensions of second matrix \n");
scanf("%d %d",&hb,&wb);
int size1 = sizeof(int)*ha*wa;
int size2 = sizeof(int)*hb*wb;
int size3 = sizeof(int)*ha*wb;
a = (int*)malloc(ha*wa*sizeof(int));
b = (int*)malloc(hb*wb*sizeof(int));
t = (int*)malloc(ha*wb*sizeof(int));
printf("Enter input matrix 1 : \n");
for(i = 0;i<ha*wa;i++)
scanf("%d",&a[i]);
printf("Enter input matrix 2 : \n");
for(i = 0;i<hb*wb;i++)
scanf("%d",&b[i]);
cudaMalloc((void**)&d_a,size1);
cudaMalloc((void**)&d_b,size2);
cudaMalloc((void**)&d_t,size3);
cudaMemcpy(d_a,a,size1,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size2,cudaMemcpyHostToDevice);
int gx,gy,bx,by;
printf("Enter the dimension of the grid \n");
scanf("%d %d",&gx,&gy);
bx = ceil((double)ha/gx);
by = ceil((double)wb/gy);
printf("The dimensions of block are : \n %d %d \n",bx,by);
dim3 grid(gx,gy);
dim3 block(bx,by);
mulElement<<<grid,block>>>(d_a,d_b,d_t,ha,wb,wa);
cudaMemcpy(t,d_t,size3,cudaMemcpyDeviceToHost);
printf("Result vector is :\n");
for(i = 0;i<ha;i++)
{
for(j = 0;j<wb;j++)
printf("%d ",t[i*wb+j]);
printf("\n");
}
getchar();
cudaFree(d_a);
cudaFree(d_t);
return 0;
} |
5,657 | #include "includes.h"
__global__ void incSumScan_kernel(unsigned int* d_outVals, unsigned int* d_inVals, size_t numVals)
{
unsigned int tIdx = threadIdx.x;
unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ unsigned int s_incScan[];
if (gIdx >= numVals) return;
s_incScan[tIdx] = d_inVals[tIdx];
__syncthreads();
for (int offset = 1; offset <= numVals; offset = offset * 2)
{
unsigned int temp = s_incScan[tIdx];
unsigned int neighbor = 0;
if (tIdx >= offset ) {
neighbor = s_incScan[tIdx - offset];
__syncthreads();
s_incScan[tIdx] = temp + neighbor;
}
__syncthreads();
}
d_outVals[tIdx] = s_incScan[tIdx];
} |
5,658 | #include "includes.h"
__global__ void Fprop1(const float* in, const float* syn1, float* layer1)
{
int i = threadIdx.x; //256
int j = blockDim.y*blockIdx.y + threadIdx.y; //64
int k = blockIdx.x; //Data.count
atomicAdd(&layer1[256*k + i], in[64*k + j] * syn1[j*256 + i]);
} |
5,659 | #include "includes.h"
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ );
__global__ void render( float* framebuffer, int width, int height )
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if( i >= width || j >= height )
{
return;
}
int pixel_index = j * width * 3 + i * 3;
framebuffer[pixel_index + 0] = float(i) / width;
framebuffer[pixel_index + 1] = float(j) / height;
framebuffer[pixel_index + 2] = 0.2f;
} |
5,660 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
#include <time.h>
#include <math.h>
#define SIZE 1000
#define BLKS 4
#define THREADSPBLKS 256
#define TILE_WIDTH 8
__global__
void heatCalcKernel(float * g_d,float * h_d, int width, int itr,int count)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int i = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int id = (row * width) + col;
row = (i / width);
col = i % width;
int left = i - 1;
int right = i + 1;
int top = ((row - 1) * width) + col;
int bottom = (((row + 1) * width)+ col);
if(((i % width) == 0) || ((i % width) == (width - 1)) || ((i * count) < width) || ((i * count) >= (width * (width - 1)))){
h_d[id] = g_d[id];
}else{
h_d[id] = 0.25 * (g_d[top] + g_d[left] + g_d[bottom] + g_d[right]);
}
__syncthreads();
g_d[i] = h_d[i];
__syncthreads();
}
void heatCalc()
{
clock_t tic;
clock_t toc;
tic = clock();
int ori_width = 100000;
int width = 25000;
int itr = 50;
int len = width * width;
int ori_len = ori_width * ori_width;
float *inhost = (float*)malloc(ori_len*sizeof(float));
if(inhost == NULL){
printf("Out of memory\n");
exit(-1);
}
float *outhost =(float*)malloc(ori_len*sizeof(float));
if(outhost == NULL){
printf("Out of memory\n");
exit(-1);
}
int j;
float * g_d;
float * h_d;
int counter = 0;
/*----------------------------------------------------------------*/
cudaError_t error;
cudaDeviceProp dev;
error = cudaGetDeviceProperties(&dev, 0);
if(error != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(error));
exit(-1);
}
printf("\nDevice %d:\n", 0);
printf("name: %s\n",dev.name);
cudaSetDevice(0);
/*--------------------------------------------------------------*/
for( j = 0; j < len; j++){
if((j >= 10) && (j <= 30)){
inhost[j] = 150;
if(j == 10){
printf("%f\n",inhost[j]);
}
}else if((j < width) || ((j % width) == 0) || ((j % width) == (width - 1)) || (j >= (width * (width - 1)))){
inhost[j] = 80;
if(j == 1){
printf("%f\n",inhost[j]);
}
}else{
inhost[j] = 0;
}
//inhost[j] = j;
}
for( j = 0; j < len; j++){
outhost[j] = 0;
}
printf("---------\n");
int l = 0;
for(counter = 0; counter < itr; counter++){
int count = ceil(ori_len / len);
for(int l = 0; l < count; count++){
float *inhost1 = (float*)malloc(len*sizeof(float));
float *outhost1 = (float*)malloc(len*sizeof(float));
int index = 0;
if(l == 0){
for(j = 0; j < len; j++){
inhost1[index] = inhost[j];
}
}else{
for(j = ((l * len) - width); j < (((l + 1) * len) - width); j++){
inhost1[index] = inhost[j];
}
}
cudaMalloc((void**)&g_d, (len*sizeof(float)));
//intialize the matrix
cudaMemcpy(g_d,inhost1,len*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc((void**)&h_d, len*sizeof(float));
int grid = ceil(width / TILE_WIDTH);
dim3 dimGrid(grid,grid);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH);
// kernel invocation
heatCalcKernel<<<dimGrid,dimBlock>>>(g_d,h_d,width,itr,(count + 1));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
//transfer C_d from device to host
cudaMemcpy(outhost1, h_d, (len*sizeof(float)), cudaMemcpyDeviceToHost);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaFree(g_d);
cudaFree(h_d);
if(count == 0){
for(j = 0; j < len; j++){
outhost[j] = outhost1[j];
}
}else{
int add = (count * len) - width;
for(j = 0; j < len; j++){
outhost[j + add] = outhost1[j];
}
}
free(inhost1);
free(outhost1);
}
for(j = 0; j < (ori_len - width); j++){
inhost[j] = outhost[j];
outhost[j] = 0;
}
}
toc = clock();
double time_taken_parallel = (double)(toc -tic)/CLOCKS_PER_SEC; // in seconds
printf("time taken: %f\n", time_taken_parallel);
free(inhost);
free(outhost);
}
int main()
{
heatCalc();
return 0;
}
|
5,661 | #include "includes.h"
__global__ void convertFloatToRGBA_kernel(uchar4 *out_image, const float *in_image, int width, int height) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
int IND = y * width + x;
float val = in_image[IND];
temp.x = val;
temp.y = val;
temp.z = val;
temp.w = 255;
out_image[IND] = temp;
}
} |
5,662 | #include "includes.h"
__global__ void fupdate_inter(float *z, float *g, float invlambda, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
float DIVZ;
if (px<nx && py<ny)
{
// compute the divergence
DIVZ = 0;
if ((px<(nx - 1))) DIVZ += z[3 * (idx)+0];
if ((px>0)) DIVZ -= z[3 * (idx - 1) + 0];
if ((py<(ny - 1))) DIVZ += z[3 * (idx)+1];
if ((py>0)) DIVZ -= z[3 * (idx - nx) + 1];
// update f
z[3 * idx + 2] = DIVZ - g[idx] * invlambda;
}
} |
5,663 | extern "C"
__global__ void compute_probs(double* alphas, double* rands, double* probs, int n, int K, int M) {
// assign overall id/index of the thread = id of row
int i = blockIdx.x * blockDim.x + threadIdx.x;
int threads_per_block = blockDim.x;
// set up shared memory: half for probs and half for w
extern __shared__ double shared[];
double* probs_shared = shared;
double* w = &shared[K*threads_per_block]; // shared mem is one big block, so need to index into latter portion of it to use for w
if(i < n) {
double maxval;
int m, k;
int maxind;
double M_d = (double) M;
// initialize shared memory probs
for(k = 0; k < K; ++k) {
probs_shared[k*threads_per_block + threadIdx.x] = 0.0;
}
// core computation
for(m = 0; m < M; ++m){ // loop over Monte Carlo iterations
for(k = 0; k < K; ++k){ // generate W ~ N(alpha, 1)
w[k*threads_per_block + threadIdx.x] = alphas[k*n + i] + rands[k*M + m];
}
maxind = K-1;
maxval = w[(K-1)*threads_per_block + threadIdx.x];
for(k = 0; k < (K-1); ++k){
if(w[k*threads_per_block + threadIdx.x] > maxval){
maxind = k;
maxval = w[k*threads_per_block + threadIdx.x];
}
}
probs_shared[maxind*threads_per_block + threadIdx.x] += 1.0;
}
for(k = 0; k < K; ++k) {
probs_shared[k*threads_per_block + threadIdx.x] /= M_d;
}
// copy to device memory so can be returned to CPU
for(k = 0; k < K; ++k) {
probs[k*n + i] = probs_shared[k*threads_per_block + threadIdx.x];
}
}
}
|
5,664 | //pass
//--blockDim=10 --gridDim=64 --no-inline
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
#define N 2
__global__ void foo() {
__shared__ int A[10];
A[threadIdx.x] = 2;
__syncthreads (); //evita corrida de dados
int x = A[threadIdx.x + 1];
}
|
5,665 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#define min(a,b) ((a) < (b) ? (a) : (b))
#define bufferSize 1024
#define NUMTHREAD 256
#define NUMBLOCK 64
char buffer[bufferSize];
int deviceNum = 1, debug = 0, randGen = 0, lenS, m;
int nSerialInputs =1; // the number of serial inputs
unsigned char *rS;
unsigned lenC, lenD, n, power2[32];
unsigned bitMask[32] = {
0x00000000, 0x80000000, 0xC0000000, 0xE0000000,
0xF0000000, 0xF8000000, 0xFC000000, 0xFE000000,
0xFF000000, 0xFF800000, 0xFFC00000, 0xFFE00000,
0xFFF00000, 0xFFF80000, 0xFFFC0000, 0xFFFE0000,
0xFFFF0000, 0xFFFF8000, 0xFFFFC000, 0xFFFFE000,
0xFFFFF000, 0xFFFFF800, 0xFFFFFC00, 0xFFFFFE00,
0xFFFFFF00, 0xFFFFFF80, 0xFFFFFFC0, 0xFFFFFFE0,
0xFFFFFFF0, 0xFFFFFFF8, 0xFFFFFFFC, 0xFFFFFFFE,
};
unsigned ***bitRS, *bitLenS, **bitC, *bitLenC, **bitD, **bitTmp;
unsigned ***gpuBitRS, **gpuBitC, **gpuBitD, **gpuBitTmp, **gpuParity;
cudaStream_t *streams; // streams
void bitPrint(unsigned *bitX, int length){
int i, j, bitPos, tmp;
bitPos = 31;
j = 0;
for(i=0;i<length;i++){
tmp = bitX[j] & power2[bitPos];
printf("%1d ",tmp?1:0);
bitPos--;
if (bitPos == -1) bitPos = 31, j++;
}
printf("\n");
}
void init(int argc, char* argv[]){
int i, j, bitPos;
FILE *fp = NULL;
char* token;
unsigned short randData[3];
struct timeval tv;
/* add nother input named (nSerialInputs) that determines the number of input streams */
if (argc < 3){
printf("Usage: ./gpuBitStream filename length -s NumberofSerialInputs -b debugLvl -d deviceNum\n");
printf("\tto generate random string: ./gpuBitStream randGen length\n");
exit(1);
}
if (argc > 3){
i = 3;
while (i<argc){
if (!strcmp(argv[i],"-s")) sscanf(argv[i+1],"%d",&nSerialInputs);
if (!strcmp(argv[i],"-b")) sscanf(argv[i+1],"%d",&debug);
if (!strcmp(argv[i],"-d")) sscanf(argv[i+1],"%d",&deviceNum);
i += 2;
}
}
if (strcmp(argv[1],"randGen") == 0) randGen = 1;
else{
randGen = 0;
fp = fopen(argv[1],"r");
if (!fp){
printf("%s doesn't exist\n",argv[1]);
exit(1);
}
}
sscanf(argv[2],"%d",&lenS);
if (lenS <= 0){
printf("positive length needed\n");
exit(1);
}
rS = (unsigned char*)malloc(sizeof(unsigned char)*lenS);
if (randGen){
gettimeofday(&tv,NULL);
randData[0] = (unsigned short) tv.tv_usec;
randData[1] = (unsigned short) tv.tv_sec;
randData[2] = (unsigned short) (tv.tv_sec >> 16);
rS[lenS-1] = 1;
for(i=2;i<=lenS;i++) rS[lenS-i] = (erand48(randData) > 0.5) ? 1 : 0;
}
else{
i = 0;
while (fgets(buffer,bufferSize,fp) && i<lenS){
token = strtok(buffer," ");
while (token){
rS[lenS-i-1] = atoi(token);
i++;
token = strtok(NULL," ");
}
}
fclose(fp);
if (i != lenS){
printf("file has only %d bits\n",i);
exit(1);
}
}
power2[0] = 1;
for(i=1;i<32;i++) power2[i] = 2*power2[i-1];
/* we have to have nSerialInputs array's for n serial inputs*/
bitRS = new unsigned**[nSerialInputs];
bitC = new unsigned*[nSerialInputs];
bitD = new unsigned*[nSerialInputs];
bitTmp = new unsigned*[nSerialInputs];
bitLenS = new unsigned[nSerialInputs];
bitLenC = new unsigned[nSerialInputs];
for(i=0; i<nSerialInputs; i++){
// bitLenS is same for all streams for now... later might change
bitLenS[i] = (lenS+31)/32;
bitRS[i] = new unsigned*[32];
for(j=0; j<32;j++)
bitRS[i][j] = (unsigned*)malloc(sizeof(unsigned)*(bitLenS[i]+1));
bitC[i] = (unsigned*)malloc(sizeof(unsigned)*bitLenS[i]);
bitD[i] = (unsigned*)malloc(sizeof(unsigned)*(bitLenS[i]+1));
bitD[i][0] = 0;
bitD[i]++;
bitTmp[i] = (unsigned*)malloc(sizeof(unsigned)*bitLenS[i]);
j = 0;
bitRS[i][0][j] = 0;
bitPos = 31;
int ix;
for(ix=0;ix<lenS;ix++){
if (rS[ix]) bitRS[i][0][j] |= power2[bitPos];
bitPos--;
if (bitPos == -1){
bitPos = 31;
j++;
bitRS[i][0][j] = 0;
}
}
bitRS[i][0][bitLenS[i]] = 0;
for(ix=1;ix<32;ix++){
for(j=0;j<bitLenS[i];j++)
bitRS[i][ix][j] = (bitRS[i][0][j] << ix) |
((bitRS[i][0][j+1] & bitMask[ix]) >> (32-ix));
bitRS[i][ix][bitLenS[i]] = 0;
}
}
}
void initGPU(void){
int i,j;
int num_devices=0;
cudaGetDeviceCount(&num_devices);
// check if the command-line chosen device ID is within range, exit if not
if( deviceNum >= num_devices )
{
printf("choose device ID between 0 and %d\n", num_devices-1);
exit(1);
}
cudaSetDevice(deviceNum);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, deviceNum);
printf("> Device name : %s\n", deviceProp.name );
printf("> CUDA Capable SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
/* we have to change the memory allocation for gpu as well as we done for cpu */
gpuBitRS = new unsigned**[nSerialInputs];
gpuBitC = new unsigned*[nSerialInputs];
gpuBitD = new unsigned*[nSerialInputs];
gpuBitTmp = new unsigned*[nSerialInputs];
gpuParity = new unsigned*[nSerialInputs];
// allocate and initialize an array of stream handles
streams = (cudaStream_t*) malloc(1 * sizeof(cudaStream_t));
for(i=0; i<1; i++)
cudaStreamCreate(&(streams[i]));
for(j=0; j<nSerialInputs; j++){
gpuBitRS[j] = new unsigned*[32];
for(i=0;i<32;i++){
cudaMalloc((void**)&gpuBitRS[j][i],sizeof(unsigned)*(bitLenS[j]+1));
cudaMemcpy(gpuBitRS[j][i],bitRS[j][i],sizeof(unsigned)*(bitLenS[j]+1),cudaMemcpyHostToDevice);
}
cudaMalloc((void**)&gpuBitC[j],sizeof(unsigned)*bitLenS[j]);
cudaMalloc((void**)&gpuBitD[j],sizeof(unsigned)*(bitLenS[j]+1));
cudaMalloc((void**)&gpuBitTmp[j],sizeof(unsigned)*bitLenS[j]);
cudaMalloc((void**)&gpuParity[j],sizeof(unsigned)*NUMBLOCK);
}
}
__global__ void kernel1(unsigned* rS, unsigned* C, unsigned* parity, int lenC){
__shared__ unsigned sParity[NUMTHREAD];
int myC = blockIdx.x*blockDim.x + threadIdx.x;
sParity[threadIdx.x] = 0;
if (myC > lenC) return;
while (myC <= lenC){
sParity[threadIdx.x] ^= C[myC] & rS[myC];
myC += NUMTHREAD*NUMBLOCK;
}
__syncthreads();
if (NUMTHREAD >= 1024){
if (threadIdx.x < 512)
sParity[threadIdx.x] ^= sParity[threadIdx.x+512];
__syncthreads();
}
if (NUMTHREAD >= 512){
if (threadIdx.x < 256)
sParity[threadIdx.x] ^= sParity[threadIdx.x+256];
__syncthreads();
}
if (NUMTHREAD >= 256){
if (threadIdx.x < 128)
sParity[threadIdx.x] ^= sParity[threadIdx.x+128];
__syncthreads();
}
if (NUMTHREAD >= 128){
if (threadIdx.x < 64)
sParity[threadIdx.x] ^= sParity[threadIdx.x+64];
__syncthreads();
}
if (threadIdx.x < 32){
volatile unsigned *tmem = sParity;
if (NUMTHREAD >= 64)
tmem[threadIdx.x] ^= tmem[threadIdx.x+32];
if (NUMTHREAD >= 32)
tmem[threadIdx.x] ^= tmem[threadIdx.x+16];
if (NUMTHREAD >= 16)
tmem[threadIdx.x] ^= tmem[threadIdx.x+8];
if (NUMTHREAD >= 8)
tmem[threadIdx.x] ^= tmem[threadIdx.x+4];
if (NUMTHREAD >= 4)
tmem[threadIdx.x] ^= tmem[threadIdx.x+2];
if (NUMTHREAD >= 2)
tmem[threadIdx.x] ^= tmem[threadIdx.x+1];
}
if (threadIdx.x == 0) parity[blockIdx.x] = sParity[0];
}
__global__ void kernel2(unsigned* parity, int num){
__shared__ unsigned sParity[NUMBLOCK];
sParity[threadIdx.x] = (threadIdx.x < num) ? parity[threadIdx.x] : 0;
__syncthreads();
if (NUMBLOCK >= 1024){
if (threadIdx.x < 512)
sParity[threadIdx.x] ^= sParity[threadIdx.x+512];
__syncthreads();
}
if (NUMBLOCK >= 512){
if (threadIdx.x < 256)
sParity[threadIdx.x] ^= sParity[threadIdx.x+256];
__syncthreads();
}
if (NUMBLOCK >= 256){
if (threadIdx.x < 128)
sParity[threadIdx.x] ^= sParity[threadIdx.x+128];
__syncthreads();
}
if (NUMBLOCK >= 128){
if (threadIdx.x < 64)
sParity[threadIdx.x] ^= sParity[threadIdx.x+64];
__syncthreads();
}
if (threadIdx.x < 32){
volatile unsigned *tmem = sParity;
if (NUMBLOCK >= 64)
tmem[threadIdx.x] ^= tmem[threadIdx.x+32];
if (NUMBLOCK >= 32)
tmem[threadIdx.x] ^= tmem[threadIdx.x+16];
if (NUMBLOCK >= 16)
tmem[threadIdx.x] ^= tmem[threadIdx.x+8];
if (NUMBLOCK >= 8)
tmem[threadIdx.x] ^= tmem[threadIdx.x+4];
if (NUMBLOCK >= 4)
tmem[threadIdx.x] ^= tmem[threadIdx.x+2];
if (NUMBLOCK >= 2)
tmem[threadIdx.x] ^= tmem[threadIdx.x+1];
}
if (threadIdx.x == 0) parity[0] = sParity[0];
}
__global__ void
kernel3(unsigned* C, unsigned *D, int shiftD, unsigned mask, int num){
int myIndex = blockIdx.x*blockDim.x + threadIdx.x;
while (myIndex < num){
C[myIndex] ^= ((D[myIndex-1] & ~mask) << (32-shiftD)) |
((D[myIndex] & mask) >> shiftD);
myIndex += NUMTHREAD*NUMBLOCK;
}
}
__global__ void kernel4(unsigned* C, unsigned* D, int num){
int myIndex = blockIdx.x*blockDim.x + threadIdx.x;
while (myIndex < num){
C[myIndex] ^= D[myIndex];
myIndex += NUMTHREAD*NUMBLOCK;
}
}
void gpuBitSerial(int input_index){
// the input_index determines which serial input should be executed
int i, numBlock, q, r, upperBound, wordCnt, shiftD, startC, word, bitPos;
unsigned d;
bitD[input_index][0] = bitC[input_index][0] = power2[31];
for(i=1;i<bitLenS[input_index];i++) bitD[input_index][i] = bitC[input_index][i] = 0;
cudaMemcpyAsync(gpuBitC[input_index],bitC[input_index],sizeof(int)*bitLenS[input_index],cudaMemcpyHostToDevice,streams[0]);
cudaMemcpyAsync(gpuBitD[input_index],bitD[input_index]-1,sizeof(int)*(bitLenS[input_index]+1),cudaMemcpyHostToDevice,streams[0]);
gpuBitD[input_index]++;
n = lenC = lenD = 0;
m = -1;
while (n<lenS){
q = (lenS-1-n) >> 5;
r = (lenS-1-n) & ~bitMask[27];
bitLenC[input_index] = (lenC+1+31)>>5;
numBlock = (bitLenC[input_index]+NUMTHREAD-1)/NUMTHREAD;
numBlock = min(numBlock,NUMBLOCK);
kernel1<<<numBlock,NUMTHREAD, 0, streams[0]>>>(gpuBitRS[input_index][r]+q,gpuBitC[input_index],gpuParity[input_index],bitLenC[input_index]);
kernel2<<<1,NUMBLOCK, 0, streams[0]>>>(gpuParity[input_index],numBlock);
cudaMemcpyAsync(&d,gpuParity[input_index],sizeof(unsigned),cudaMemcpyDeviceToHost,streams[0]);
d = d - ((d >> 1) & 0x55555555);
d = (d & 0x33333333) + ((d >> 2) & 0x33333333);
d = (((d + (d >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
if (d & power2[0]){
if (lenC<=(n>>1))
cudaMemcpyAsync(gpuBitTmp[input_index],gpuBitC[input_index],sizeof(unsigned)*bitLenC[input_index],cudaMemcpyDeviceToDevice,streams[0]);
upperBound = min(lenD+1,lenS+m-n);
startC = (n-m) >> 5;
shiftD = (n-m) & ~bitMask[27];
wordCnt = 0;
if (shiftD){
upperBound -= (32-shiftD);
wordCnt++;
}
wordCnt += (upperBound+31) >> 5;
numBlock = (wordCnt+NUMTHREAD-1)/NUMTHREAD;
numBlock = min(numBlock,NUMBLOCK);
if (shiftD)
kernel3<<<numBlock,NUMTHREAD, 0, streams[0]>>>(gpuBitC[input_index]+startC,gpuBitD[input_index],shiftD,bitMask[32-shiftD],wordCnt);
else
kernel4<<<numBlock,NUMTHREAD, 0, streams[0]>>>(gpuBitC[input_index]+startC,gpuBitD[input_index],wordCnt);
if (lenC<=(n>>1)){
cudaMemcpyAsync(gpuBitD[input_index],gpuBitTmp[input_index],sizeof(unsigned)*bitLenC[input_index],cudaMemcpyDeviceToDevice,streams[0]);
lenD = lenC;
lenC = n+1-lenC;
m = n;
}
}
n++;
}
cudaMemcpyAsync(bitC[input_index],gpuBitC[input_index],sizeof(int)*bitLenS[input_index],cudaMemcpyDeviceToHost,streams[0]);
word = (lenC+1) >> 5;
bitPos = 32 - ((lenC+1) & ~bitMask[27]);
if (bitPos == 32){
bitPos = 0;
word--;
}
while(1){
if ((bitC[input_index][word] & power2[bitPos]) == 0) lenC--;
else break;
bitPos++;
if (bitPos == 32){
bitPos = 0;
word--;
}
}
if (debug){
printf("gpuBitSerial: degree is %d for input: %d\n",lenC,input_index);
bitPrint(bitC[input_index],lenC+1);
}
}
int main(int argc, char *argv[]){
struct timeval tv1, tv2;
int sec, usec,i;
init(argc,argv);
initGPU();
printf("input length %d\n",lenS);
gettimeofday(&tv1,NULL);
for(i=0; i<nSerialInputs; i++)
gpuBitSerial(i);
gettimeofday(&tv2,NULL);
sec = (int) (tv2.tv_sec-tv1.tv_sec);
usec = (int) (tv2.tv_usec-tv1.tv_usec);
if (usec < 0){
sec--;
usec += 1000000;
}
printf("gpuBitSerial for %d inputs: %f sec\n",nSerialInputs,sec+usec/1000000.0);
return 0;
}
|
5,666 | // source https://www.computer-graphics.se/hello-world-for-cuda.html
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA
// with an array of offsets. Then the offsets are added in parallel
// to produce the string "World!"
// By Ingemar Ragnemalm 2010
// nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <unistd.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
int idx = threadIdx.x; // + 6;
if (idx < N)
a[idx] = a[threadIdx.x] + b[threadIdx.x];
}
void exitWithFailure(cudaError err, int line)
{
printf("Error %d at line %d\n", err, line);
exit(1);
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaError err;
err = cudaMalloc( (void**)&ad, csize );
if (err != cudaSuccess) exitWithFailure(err, __LINE__);
err = cudaMalloc( (void**)&bd, isize );
if (err != cudaSuccess) exitWithFailure(err, __LINE__);
err = cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
if (err != cudaSuccess) exitWithFailure(err, __LINE__);
err = cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
if (err != cudaSuccess) exitWithFailure(err, __LINE__);
err = cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
if (err != cudaSuccess) exitWithFailure(err, __LINE__);
err = cudaFree( ad );
if (err != cudaSuccess) exitWithFailure(err, __LINE__);
err = cudaFree( bd );
printf("%s\n", a);
return EXIT_SUCCESS;
}
|
5,667 | #include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#include<time.h>
#include<float.h>
__constant__ int PIC[28*28];
__global__ void conv1(int *filterd, int *resultd){
int xsize = 28;
int filterdim = 5;
__shared__ int fil[25];
int i,j,l;
int sum, offset;
i = threadIdx.y;
j = threadIdx.x;
l = blockIdx.x;
offset = l*25;
sum =0;
if(i<filterdim && j<filterdim){
fil[i*filterdim+j] = filterd[offset + i*filterdim+j];
// printf("offset: %d, \t fil[%d][%d]:%d\n",offset,i,j,fil[i*filterdim+j]);
}
__syncthreads();
if(i<(xsize -filterdim +1)&& j<(xsize -filterdim +1)){
sum = fil[0]*PIC[ xsize * (i) + j ] + fil[1]*PIC[ xsize*(i) + (j+1) ]
+ fil[2]*PIC[ xsize * (i)+(j+2)] + fil[3]*PIC[xsize * (i)+(j+3)]
+ fil[4]*PIC[ xsize * (i)+(j+4)]+ fil[5]*PIC[ xsize*(i+1)+(j) ]
+ fil[6]*PIC[ xsize * (i+1) + (j+1) ] + fil[7]*PIC[ xsize*(i+1) + (j+2) ] +
fil[8]*PIC[ xsize*(i+1) + (j+3) ] + fil[9]*PIC[ xsize*(i+1) + (j+4) ] +
fil[10]*PIC[ xsize*(i+2) + (j) ] + fil[11]*PIC[ xsize * (i+2) + (j+1) ] +
fil[12]*PIC[ xsize*(i+2) + (j+2)] + fil[13]*PIC[ xsize*(i+2) + (j+3)]
+fil[14]*PIC[ xsize*(i+2) + (j+4)] + fil[15]*PIC[ xsize*(i+3) + (j)]
+ fil[16]*PIC[ xsize*(i+3) + (j+1)] + fil[17]*PIC[ xsize*(i+3) + (j+2)]
+ fil[18]*PIC[ xsize*(i+3) + (j+3)] + fil[19]*PIC[ xsize*(i+3) + (j+4)]
+ fil[20]*PIC[ xsize*(i+4) + (j)] +fil[21]*PIC[ xsize*(i+3) + (j+1)]
+ fil[22]*PIC[ xsize*(i+4) + (j+2)] + fil[23]*PIC[ xsize*(i+4) + (j+3)]
+ fil[24]*PIC[ xsize*(i+4) + (j+4)];
resultd[l*(xsize -filterdim +1)*(xsize -filterdim +1) + i*(xsize - filterdim +1)+j] = sum;
// printf("offset2 : %d \t resultgpu[%d][%d]=%d\n",offset,l,i*(xsize - filterdim +1)+j,resultd[l*(xsize -filterdim +1)*(xsize -filterdim +1) + i*(xsize - filterdim +1)+j]);
}
}
__global__ void maxpooling(int *maxip1d, int *maxop1d){
int i,j,l,offset;
i = threadIdx.y;
j = threadIdx.x;
l = blockIdx.x;
int xsize = 24;
int filterdim = 5;
offset = l*xsize*xsize;
__shared__ int max[576];
if(i<12 && j<12){
max[i*2*xsize + j*2] = maxip1d[offset + i*2*xsize + j*2];
max[i*2*xsize+j*2+1] = maxip1d[offset + i*2*xsize +j*2+1];
max[i*2*xsize+j*2+24] = maxip1d[offset + i*2*xsize +j*2+24];
max[i*2*xsize+j*2+25] = maxip1d[offset + i*2*xsize +j*2+25];
// printf("i: %d,\t j: %d,\t l: %d,\t max1: %d,\t max2: %d,\t max3: %d,\t max4: %d\n",i,j,l,max[i*xsize + j],max[i*xsize+1],max[i*xsize+24],max[i*xsize+25]);
}
__syncthreads();
if(i<12 && j<12){
int max1, max2;
if(max[i*xsize + j]>=max[i*xsize + j+1]){
max1 = max[i*xsize + j];
}
else{
max1 = max[i*xsize + j+1];
}
if(max[i*xsize + j+24]>=max[i*xsize + j+25]){
max2 = max[i*xsize + j+24];
}
else{
max2 = max[i*xsize + j+25];
}
if(max1>=max2){
maxop1d[l*144 + i*12+j]=max1;
// printf("Max1 : %d\t l: %d \t i: %d\t j: %d\n",max1,l,i,j);
}
else{
maxop1d[l*144 + i*12+j] = max2;
// printf("Max2 : %d\n",max2);
}
// printf("Maxpool1d[%d][%d]:%d\n",l,i*12+j,maxop1d[l*144 + i*12+j]);
}
}
__global__ void conv2(int *cip2d, int *filter2d, int *cop2d){
int i,j,l,sum;
i = threadIdx.y;
j = threadIdx.x;
l = blockIdx.x;
int lstar;
lstar = l*800;
sum = 0;
int k =0;
int di = 12;
int disquare = di*di;
int m;
if(i<8 && j<8){
for(m = 0; m<32; m++){
sum = sum + filter2d[lstar + k]*cip2d[(m*disquare)+ (di*i) + j] + filter2d[lstar + k+1]*cip2d[(m*disquare)+ di*(i) + (j+1)]
+ filter2d[lstar+ k+2]*cip2d[(m*disquare)+ di*(i)+(j+2)] + filter2d[lstar +k+3]*cip2d[(m*disquare)+ di*(i)+(j+3)]
+ filter2d[lstar+k+4]*cip2d[(m*disquare)+ di*(i)+(j+4)]+ filter2d[lstar+ k+5]*cip2d[(m*disquare)+ di*(i+1)+(j)]
+ filter2d[lstar +k+6]*cip2d[(m*disquare)+ di* (i+1) + (j+1) ] + filter2d[lstar+ k+7]*cip2d[(m*disquare)+ di*(i+1)+(j+2)]
+ filter2d[lstar+k+8]*cip2d[(m*disquare)+ di*(i+1) + (j+3) ] + filter2d[lstar +k+9]*cip2d[(m*disquare)+ di*(i+1) +(j+4)]
+ filter2d[lstar+k+10]*cip2d[(m*disquare)+ di*(i+2) +(j)] + filter2d[lstar+k+11]*cip2d[(m*disquare)+ di* (i+2) + (j+1)]
+ filter2d[lstar+k+12]*cip2d[(m*disquare)+ di*(i+2) + (j+2)] +filter2d[lstar+k+13]*cip2d[(m*disquare)+ di*(i+2)+(j+3)]
+ filter2d[lstar+k+14]*cip2d[(m*disquare)+ di*(i+2)+(j+4)]+filter2d[lstar+k+15]*cip2d[(m*disquare)+ di*(i+3)+(j)]
+ filter2d[lstar+k+16]*cip2d[(m*disquare)+ di*(i+3)+(j+1)]+filter2d[lstar+k+17]*cip2d[(m*disquare)+ di*(i+3)+(j+2)]
+ filter2d[lstar+k+18]*cip2d[(m*disquare)+ di*(i+3)+(j+3)] + filter2d[lstar+k+19]*cip2d[(m*disquare)+di*(i+3)+(j+4)]
+ filter2d[lstar+k+20]*cip2d[(m*disquare)+ di*(i+4)+(j)] +filter2d[lstar+k+21]*cip2d[(m*disquare)+ di*(i+3)+(j+1)]
+ filter2d[lstar +k+22]*cip2d[(m*disquare)+ di*(i+4)+(j+2)] + filter2d[lstar+k+23]*cip2d[(m*disquare)+ di*(i+4)+(j+3)]
+ filter2d[lstar+ k+24]*cip2d[(m*disquare)+ di*(i+4) + (j+4)];
k+=25;
}
cop2d[l*64+i*8+j] = sum;
// printf("resultdevice[%d][%d]:%d\n",l,i*8+j,cop2d[l*64+i*8+j]);
}
}
__global__ void maxpool(int *maxip2d, int *maxop2d){
int i,j,l;
i = threadIdx.y;
j = threadIdx.x;
l = blockIdx.x;
int offset;
offset = l*64;
int xsize = 12;
__shared__ int max2[64];
if(i<8 && j<8){
max2[i*8 + j] = maxop2d[offset + i*8 +j];
}
__syncthreads();
if(i<4 && j<4){
int a,b,c,d, m1, m2;
// index = threadIdx.x*2 + threadIdx.y*2*8;
a = max2[i*16 + j*2];
b = max2[i*16 + j*2 +1];
c = max2[i*16 + j*2+8];
d = max2[i*16 + j*2 + 9];
if(a>=b){
m1 = a;
}
else{
m1 = b;
}
if(c>=d){
m2 = c;
}
else{
m2 = d;
}
if(m1>=m2){
maxop2d[l*16 + i*4+j]=m1;
}
else{
maxop2d[l*16 + i*4+j] = m2;
}
}
}
__global__ void dense1(int *denseip1d, int *weight1d, int *denseop1d){
int i;
i=threadIdx.x;
int k;
int length;
length = 64*4*4;
for(k=0;k<length;k++){
denseop1d[i] += weight1d[i*length + k]*denseip1d[k];
}
}
__global__ void dense2(int *denseip2d, int *weight2d, int *denseop2d){
int i;
i = threadIdx.x;
int k;
int length;
length =64;
for(k=0;k<length;k++){
denseop2d[i]+=weight2d[i*length + k]*denseip2d[k];
}
// printf("denseop2d[%d]:%d\n",i,denseop2d[i]);
}
int main(int argc, char **argv){
int xsize;
int filterdim;
int numfilters;
int numfilters1;
int numunits;
int numunits1;
xsize = 28;
filterdim = 5;
numfilters = 32;
numfilters1 = 64;
numunits = 64;
numunits1 =10;
/*Numbytes required for initial image*/
int numbytes = xsize*xsize*sizeof(int);
/*Numbytes require for the output of first convolution layer*/
int numbytes2 = (xsize-filterdim+1)*(xsize-filterdim+1)*sizeof(int); //24x24
/**Numbytes required for output of first maxpool layer**/
int numbytes3 = ((xsize-filterdim+1)*(xsize-filterdim+1)/4)*sizeof(int); //12x12
/*Numbytes required for the output of second convolution layer*/
int numbytes4 = ((xsize-filterdim+1)/2 - filterdim + 1)*((xsize-filterdim+1)/2 - filterdim + 1)*sizeof(int);//8x8
/*Numbytes required for the output of second maxpool layer*/
int numbytes5 = (numbytes4/4);//4x4
/*Numbytes required for the weight matrix for the first dense layer*/
int numbytes6 = (numunits*numfilters1*numbytes5);//64x64x4x4
/*Image on host side*/
/*Ip and op to first conv layer*/
unsigned int *pic = (unsigned int *)malloc(numbytes);
int *result;
int *filter;
/*op to first maxpool layer*/
int *maxop1;
/*op of second conv layer*/
int *cop2;
int *filter2;
/*op to second maxpool layer*/
int *maxop2;
/*op of first dense layer*/
int *denseop1;
int *weight1;
/*op of second dense layer*/
int *denseop2;
int *weight2;
/*Device side variables*/
int *filterd;
int *resultd;
/*Ip and op to first maxpool layer*/
int *maxip1d;
int *maxop1d;
/*Ip and op of second conv layer*/
int *cip2d;
int *cop2d;
int *filter2d;
/*ip and op to second maxpool layer*/
int *maxip2d;
int *maxop2d;
/*ip and op of first dense layer*/
int *denseip1d;
int *denseop1d;
int *weight1d;
/*ip and op of second dense layer*/
int *denseip2d;
int *denseop2d;
int *weight2d;
filter = (int *)malloc( numfilters*filterdim*filterdim*sizeof(int));
result = (int *)malloc(numfilters*numbytes2);
maxop1 = (int *)malloc(numfilters*numbytes3);
cop2 = (int *)malloc(numfilters1*numbytes4);
filter2 = (int *)malloc(numfilters1*numfilters*filterdim*filterdim*sizeof(int));
maxop2 = (int *)malloc(numfilters1*numbytes5);
denseop1 = (int *)malloc(numunits*sizeof(int));
weight1 = (int *)malloc(numbytes6);
denseop2 = (int *)malloc(numunits1*sizeof(int));
weight2 = (int *)malloc(numunits*numunits1*sizeof(int));
cudaMalloc(&filterd, numfilters*filterdim*filterdim*sizeof(int));
cudaMalloc(&resultd, numfilters*numbytes2);
cudaMalloc(&maxip1d, numfilters*numbytes2);
cudaMalloc(&maxop1d, numfilters*numbytes3);
cudaMalloc(&cip2d, numfilters*numbytes3);
cudaMalloc(&cop2d, numfilters1*numbytes4);
cudaMalloc(&filter2d, numfilters1*numfilters*filterdim*filterdim*sizeof(int));
cudaMalloc(&maxip2d, numfilters1*numbytes4);
cudaMalloc(&maxop2d, numfilters1*numbytes5);
cudaMalloc(&denseip1d, numfilters1*4*4*sizeof(int));
cudaMalloc(&denseop1d, numunits*sizeof(int));
cudaMalloc(&weight1d, numbytes6);
cudaMalloc(&denseip2d, numunits*sizeof(int));
cudaMalloc(&denseop2d, numunits1*sizeof(int));
cudaMalloc(&weight2d, numunits*numunits1*sizeof(int));
/*Initializing the image on host side*/
/*Should modify to later on read in image*/
int i,j,k,l,count,dimx;
for (i=0; i<xsize; i++) {
for (j=0; j<xsize; j++) {
pic[i*xsize + j] = 1;
//printf("pic[%d][%d] : %d\t",i,j,pic[i*xsize + j]);
}
// printf("\n");
}
/*Initializing the filter for first conv layer to a value*/
/*TO DO : Read in filter from a file */
for(int k=0;k<numfilters;k++){
for (int i=0; i<filterdim; i++) {
for (int j=0; j<filterdim; j++){
filter[k*(filterdim*filterdim) + i*filterdim + j] = 1;
// printf("filter[%d][%d]: %d\n",k, i*filterdim + j, filter[k*(filterdim*filterdim) + i*filterdim + j]);
}
}
}
/*Initializing the filter for second conv layer to a value*/
/*TO DO : Read in filter from a file */
for(int k=0;k<numfilters1;k++){
for(int m= 0; m<numfilters;m++){
for (int i=0; i<filterdim; i++) {
for (int j=0; j<filterdim; j++){
filter2[k*(numfilters*filterdim*filterdim)+ m*filterdim*filterdim + i*filterdim + j] = 1;
// printf("filter2[%d][%d]: %d\t",k, m*filterdim*filterdim+i*filterdim + j, filter2[k*(numfilters*filterdim*filterdim)+ m*filterdim*filterdim + i*filterdim + j]);
}
}
}
// printf("\n");
}
/*Initializing the weight matrix for first dense layer*/
int length = 64*16;
for(l=0;l<numunits;l++){
for(i=0;i<length;i++){
weight1[l*length + i] = 1;
}
}
/*Initializing the weight matrix for second dense layer*/
for(l=0;l<numunits1;l++){
for(i=0;i<numunits;i++){
weight2[l*numunits + i] = 1;
}
}
/******************Code that has everything to do with kernels****************/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
dim3 dimGrid (32);
dim3 dimBlock (32,32);
// cudaMemcpy(picd,pic,numbytes, cudaMemcpyHostToDevice);
// cudaMemcpyToSymbol(FIL, filter, numfilters*filterdim*filterdim*sizeof(int));
cudaMemcpyToSymbol(PIC, pic, numbytes);
cudaMemcpy(filterd, filter, numfilters*filterdim*filterdim*sizeof(int), cudaMemcpyHostToDevice);
conv1<<<dimGrid, dimBlock>>>(filterd,resultd);
cudaMemcpy(result,resultd,numfilters*numbytes2,cudaMemcpyDeviceToHost);
dim3 dimBlock1 (16,16);
cudaMemcpy(maxip1d, result,numfilters*numbytes2, cudaMemcpyHostToDevice);
maxpooling<<<dimGrid, dimBlock1>>>(maxip1d, maxop1d);
cudaMemcpy(maxop1, maxop1d, numfilters*numbytes3, cudaMemcpyDeviceToHost);
cudaMemcpy(cip2d, maxop1,numfilters*numbytes3,cudaMemcpyHostToDevice);
cudaMemcpy(filter2d, filter2,numfilters1*numfilters*filterdim*filterdim*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid2(64);
dim3 dimBlock2(8,8);
conv2<<<dimGrid2, dimBlock2>>>(cip2d, filter2d, cop2d);
cudaMemcpy(cop2, cop2d,numfilters1*numbytes4,cudaMemcpyDeviceToHost);
cudaMemcpy(maxip2d, cop2,numfilters1*numbytes4,cudaMemcpyHostToDevice);
maxpool<<<dimGrid2, dimBlock2>>>(maxip2d, maxop2d);
cudaMemcpy(maxop2, maxop2d, numfilters1*numbytes5, cudaMemcpyDeviceToHost);
for(k=0;k<64;k++){
for(i=0;i<4;i++){
for(j=0;j<4;j++){
printf("maxpool[%d][%d]:%d\t",k,i*4+j, maxop1[k*16+i*4+j]);
}
printf("\n");
}
printf("\n\n");
}
cudaMemcpy(denseip1d, maxop2, numfilters1*numbytes5, cudaMemcpyHostToDevice);
cudaMemcpy(weight1d, weight1, numbytes6, cudaMemcpyHostToDevice);
dim3 dimGrid3(1);
dim3 dimBlock3(64);
dense1<<<dimGrid3, dimBlock3>>>(denseip1d, weight1d, denseop1d);
cudaMemcpy(denseop1, denseop1d,numunits*sizeof(int),cudaMemcpyDeviceToHost);
dim3 dimGrid4(1);
dim3 dimBlock4(10);
cudaMemcpy(denseip2d, denseop1,numunits*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(weight2d, weight2, numunits*numunits1*sizeof(int), cudaMemcpyHostToDevice);
dense2<<<dimGrid4, dimBlock4>>>(denseip2d, weight2d, denseop2d);
cudaMemcpy(denseop2, denseop2d, numunits1*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float milliseconds;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Time taken : %f seconds", milliseconds/1000);
}
|
5,668 | #include <cuda.h>
#include <cuda_runtime.h>
#define TPB 64
__global__ void D3Q19_RegBC_LBGK_ts(const double * fIn, double * fOut,
const int * SNL,
const int * VW_nl, const double VW_uz,
const int * PE_nl, const double rho_out,
const double omega,
const int Nx, const int Ny, const int Nz)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int nnodes=Nx*Ny*Nz;
if(tid<nnodes){
double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double cu;
double w;
//load the data into the registers
f0=fIn[tid]; f1=fIn[nnodes+tid];
f2=fIn[2*nnodes+tid]; f3=fIn[3*nnodes+tid];
f4=fIn[4*nnodes+tid]; f5=fIn[5*nnodes+tid];
f6=fIn[6*nnodes+tid]; f7=fIn[7*nnodes+tid];
f8=fIn[8*nnodes+tid]; f9=fIn[9*nnodes+tid];
f10=fIn[10*nnodes+tid]; f11=fIn[11*nnodes+tid];
f12=fIn[12*nnodes+tid]; f13=fIn[13*nnodes+tid];
f14=fIn[14*nnodes+tid]; f15=fIn[15*nnodes+tid];
f16=fIn[16*nnodes+tid]; f17=fIn[17*nnodes+tid];
f18=fIn[18*nnodes+tid];
//compute density and velocity
double rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
double ux=f1-f2+f7-f8+f9-f10+f11-f12+f13-f14; ux/=rho;
double uy=f3-f4+f7+f8-f9-f10+f15-f16+f17-f18; uy/=rho;
double uz=f5-f6+f11+f12-f13-f14+f15+f16-f17-f18; uz/=rho;
//take appropriate action if on PE_nl or VW_nl
if(VW_nl[tid]==1){
ux=0.;uy=0.; uz=VW_uz;
//set rho based on uz
rho = (1./(1.-uz))*(2.0*(f6+f13+f14+f17+f18)+(f0+f1+f2+f3+f4+f7+f8+f9+f10));
}
if(PE_nl[tid]==1){
ux=0.; uy=0.; rho=rho_out;
uz = -1.+((2.*(f5+f11+f12+f15+f16)+(f0+f1+f2+f3+f4+f7+f8+f9+f10)))/rho;
}
if(SNL[tid]==1){
ux=0.; uy=0.; uz=0.;
}
//everyone compute equilibrium
double fe0,fe1,fe2,fe3,fe4,fe5,fe6,fe7,fe8,fe9,fe10,fe11,fe12,fe13,fe14,fe15,fe16,fe17,fe18;
//speed 0, ex=ey=ez=0, w=1/3
fe0=rho*(1./3.)*(1.-1.5*(ux*ux+uy*uy+uz*uz));
//speed 1, ex=1, ey=ez=0, w=1/18
cu = 3.*(1.*ux);
fe1=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 2, ex=-1, ey=ez=0
cu=3.*(-1.*ux);
fe2=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 3 (0,1,0)
cu=3.*(uy);
fe3=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 4 (0,-1,0)
cu = 3.*(-uy);
fe4=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 5 (0,0,1)
cu = 3.*(uz);
fe5=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 6 (0,0,-1)
cu = 3.*(-uz);
fe6=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 7 (1,1,0) w= 1/36
cu = 3.*(ux+uy);
fe7=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 8 (-1,1,0)
cu = 3.*(-ux+uy);
fe8=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 9 (1,-1,0)
cu=3.*(ux-uy);
fe9=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 10 (-1,-1,0)
cu = 3.*(-ux-uy);
fe10=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 11 (1,0,1)
cu = 3.*(ux+uz);
fe11=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 12 (-1,0,1)
cu = 3.*(-ux+uz);
fe12=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 13 (1,0,-1)
cu = 3.*(ux-uz);
fe13=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 14 (-1,0,-1)
cu=3.*(-ux-uz);
fe14=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 15 (0,1,1)
cu=3.*(uy+uz);
fe15=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 16 (0,-1,1)
cu=3.*(-uy+uz);
fe16=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 17 (0,1,-1)
cu=3.*(uy-uz);
fe17=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 18 (0,-1,-1)
cu=3.*(-uy-uz);
fe18=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
if((VW_nl[tid]==1) || (PE_nl[tid]==1)){
//float ft0;
double ft1,ft2,ft3,ft4,ft5,ft6,ft7,ft8,ft9,ft10,ft11,ft12,ft13,ft14,ft15,ft16,ft17,ft18;
if(VW_nl[tid]==1){
//bounce-back of non-equilibrium for unknown velocities on west boundary
f5=fe5+(f6-fe6);
f11=fe11+(f14-fe14);
f12=fe12+(f13-fe13);
f15=fe15+(f18-fe18);
f16=fe16+(f17-fe17);
}else{
//bounce-back of non-equilibrium on east boundary
f6=fe6+(f5-fe5);
f13=fe13+(f12-fe12);
f14=fe14+(f11-fe11);
f17=fe17+(f16-fe16);
f18=fe18+(f15-fe15);
}
//ft0=f0-fe0;
ft1=f1-fe1;
ft2=f2-fe2;
ft3=f3-fe3;
ft4=f4-fe4;
ft5=f5-fe5;
ft6=f6-fe6;
ft7=f7-fe7;
ft8=f8-fe8;
ft9=f9-fe9;
ft10=f10-fe10;
ft11=f11-fe11;
ft12=f12-fe12;
ft13=f13-fe13;
ft14=f14-fe14;
ft15=f15-fe15;
ft16=f16-fe16;
ft17=f17-fe17;
ft18=f18-fe18;
// //apply the tensors...
f0= - ft1/3. - ft2/3. - ft3/3. - ft4/3. - ft5/3. - ft6/3. - (2.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. - (2.*ft10)/3. - (2.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. - (2.*ft14)/3. - (2.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. - (2.*ft18)/3.;
f1=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. - ft5/3. - ft6/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. - (2.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. - (2.*ft18)/3.;
f2=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. - ft5/3. - ft6/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. - (2.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. - (2.*ft18)/3.;
f3=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. - ft5/3. - ft6/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. - (2.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. - (2.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f4=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. - ft5/3. - ft6/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. - (2.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. - (2.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f5=(2.*ft5)/3. - ft2/3. - ft3/3. - ft4/3. - ft1/3. + (2.*ft6)/3. - (2.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. - (2.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f6=(2.*ft5)/3. - ft2/3. - ft3/3. - ft4/3. - ft1/3. + (2.*ft6)/3. - (2.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. - (2.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f7=(2.*ft1)/3. + (2.*ft2)/3. + (2.*ft3)/3. + (2.*ft4)/3. - ft5/3. - ft6/3. + (10.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. + (10.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f8=(2.*ft1)/3. + (2.*ft2)/3. + (2.*ft3)/3. + (2.*ft4)/3. - ft5/3. - ft6/3. - (2.*ft7)/3. + (10.*ft8)/3. + (10.*ft9)/3. - (2.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f9=(2.*ft1)/3. + (2.*ft2)/3. + (2.*ft3)/3. + (2.*ft4)/3. - ft5/3. - ft6/3. - (2.*ft7)/3. + (10.*ft8)/3. + (10.*ft9)/3. - (2.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f10=(2.*ft1)/3. + (2.*ft2)/3. + (2.*ft3)/3. + (2.*ft4)/3. - ft5/3. - ft6/3. + (10.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. + (10.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f11=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + (10.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. + (10.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f12=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. - (2.*ft11)/3. + (10.*ft12)/3. + (10.*ft13)/3. - (2.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f13=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. - (2.*ft11)/3. + (10.*ft12)/3. + (10.*ft13)/3. - (2.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f14=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + (10.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. + (10.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f15=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + (10.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. + (10.*ft18)/3.;
f16=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. - (2.*ft15)/3. + (10.*ft16)/3. + (10.*ft17)/3. - (2.*ft18)/3.;
f17=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. - (2.*ft15)/3. + (10.*ft16)/3. + (10.*ft17)/3. - (2.*ft18)/3.;
f18=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + (10.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. + (10.*ft18)/3.;
//update fIn for all velocities based on this result.
cu= 9./2.; w = 1./3.;
f0=fe0+f0*cu*w;
w=1./18.;
f1=fe1+f1*cu*w;
f2=fe2+f2*cu*w;
f3=fe3+f3*cu*w;
f4=fe4+f4*cu*w;
f5=fe5+f5*cu*w;
f6=fe6+f6*cu*w;
w=1./36.;
f7=fe7+f7*cu*w;
f8=fe8+f8*cu*w;
f9=fe9+f9*cu*w;
f10=fe10+f10*cu*w;
f11=fe11+f11*cu*w;
f12=fe12+f12*cu*w;
f13=fe13+f13*cu*w;
f14=fe14+f14*cu*w;
f15=fe15+f15*cu*w;
f16=fe16+f16*cu*w;
f17=fe17+f17*cu*w;
f18=fe18+f18*cu*w;
}
if(SNL[tid]==0){
//everyone relaxes towards equilibrium
f0=f0-omega*(f0-fe0);
f1=f1-omega*(f1-fe1);
f2=f2-omega*(f2-fe2);
f3=f3-omega*(f3-fe3);
f4=f4-omega*(f4-fe4);
f5=f5-omega*(f5-fe5);
f6=f6-omega*(f6-fe6);
f7=f7-omega*(f7-fe7);
f8=f8-omega*(f8-fe8);
f9=f9-omega*(f9-fe9);
f10=f10-omega*(f10-fe10);
f11=f11-omega*(f11-fe11);
f12=f12-omega*(f12-fe12);
f13=f13-omega*(f13-fe13);
f14=f14-omega*(f14-fe14);
f15=f15-omega*(f15-fe15);
f16=f16-omega*(f16-fe16);
f17=f17-omega*(f17-fe17);
f18=f18-omega*(f18-fe18);
}else{
//bounce back
f0=f0-omega*(f0-fe0);
//1 -- 2
cu=f1;f1=f2;f2=cu;
// 3 -- 4
cu=f3;f3=f4;f4=cu;
//5--6
cu=f5;f5=f6;f6=cu;
//7--10
cu=f7;f7=f10;f10=cu;
//8--9
cu=f8;f8=f9;f9=cu;
//11-14
cu=f11;f11=f14;f14=cu;
//12-13
cu=f12;f12=f13;f13=cu;
//15-18
cu=f15;f15=f18;f18=cu;
//16-17
cu=f16;f16=f17;f17=cu;
}
//now, everybody streams...
int Z = tid/(Nx*Ny);
int Y = (tid - Z*Nx*Ny)/Nx;
int X = tid - Z*Nx*Ny - Y*Nx;
int X_t,Y_t,Z_t,tid_t;
//speed 0 (0,0,0)
fOut[tid]=f0;
//stream(fOut,f0,0,X,Y,Z,0,0,0,Nx,Ny,Nz);
//speed 1 (1,0,0)
X_t=X+1;Y_t=Y; Z_t=Z;
if(X_t==Nx) X_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[nnodes+tid_t]=f1;
//speed 2 (-1,0,0)
X_t=X-1; Y_t=Y; Z_t=Z;
if(X_t<0)X_t=Nx-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[2*nnodes+tid_t]=f2;
//speed 3 (0,1,0)
X_t=X; Y_t=Y+1; Z_t=Z;
if(Y_t==Ny)Y_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
//tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[3*nnodes+tid_t]=f3;
//speed 4 ( 0,-1,0)
X_t=X; Y_t=Y-1; Z_t=Z;
if(Y_t<0)Y_t=Ny-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[4*nnodes+tid_t]=f4;
//speed 5 ( 0,0,1)
X_t=X;Y_t=Y;Z_t=Z+1;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[5*nnodes+tid_t]=f5;
//speed 6 (0,0,-1)
X_t=X; Y_t=Y;Z_t=Z-1;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[6*nnodes+tid_t]=f6;
//speed 7 (1,1,0)
X_t=X+1;Y_t=Y+1;Z_t=Z;
if(X_t==Nx)X_t=0;
if(Y_t==Ny)Y_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[7*nnodes+tid_t]=f7;
//speed 8 (-1,1,0)
X_t=X-1;Y_t=Y+1;Z_t=Z;
if(X_t<0)X_t=Nx-1;
if(Y_t==Ny)Y_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[8*nnodes+tid_t]=f8;
//speed 9 (1,-1,0)
X_t=X+1;Y_t=Y-1;Z_t=Z;
if(X_t==Nx)X_t=0;
if(Y_t<0)Y_t=Ny-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[9*nnodes+tid_t]=f9;
//speed 10 (-1,-1,0)
X_t=X-1;Y_t=Y-1;Z_t=Z;
if(X_t<0)X_t=Nx-1;
if(Y_t<0)Y_t=Ny-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[10*nnodes+tid_t]=f10;
//speed 11 (1,0,1)
X_t=X+1;Y_t=Y;Z_t=Z+1;
if(X_t==Nx)X_t=0;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[11*nnodes+tid_t]=f11;
//speed 12 (-1,0,1)
X_t=X-1;Y_t=Y;Z_t=Z+1;
if(X_t<0)X_t=Nx-1;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[12*nnodes+tid_t]=f12;
//speed 13 (1,0,-1)
X_t=X+1;Y_t=Y;Z_t=Z-1;
if(X_t==Nx)X_t=0;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[13*nnodes+tid_t]=f13;
//speed 14 (-1,0,-1)
X_t=X-1;Y_t=Y;Z_t=Z-1;
if(X_t<0)X_t=Nx-1;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[14*nnodes+tid_t]=f14;
//speed 15 (0,1,1)
X_t=X;Y_t=Y+1;Z_t=Z+1;
if(Y_t==Ny)Y_t=0;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[15*nnodes+tid_t]=f15;
//speed 16 (0,-1,1)
X_t=X;Y_t=Y-1;Z_t=Z+1;
if(Y_t<0)Y_t=Ny-1;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[16*nnodes+tid_t]=f16;
//speed 17 (0,1,-1)
X_t=X;Y_t=Y+1;Z_t=Z-1;
if(Y_t==Ny)Y_t=0;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[17*nnodes+tid_t]=f17;
//speed 18 ( 0,-1,-1)
X_t=X;Y_t=Y-1;Z_t=Z-1;
if(Y_t<0)Y_t=Ny-1;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[18*nnodes+tid_t]=f18;
}
}
void D3Q19_RegBC_LBGK(const double * fIn, double * fOut, const int * SNL, const int * VW_nl,
const double VW_uz, const int * PE_nl, const double rho_out, const double omega,
const int Nx, const int Ny, const int Nz)
{
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((Nx*Ny*Nz+TPB-1)/TPB,1,1);
D3Q19_RegBC_LBGK_ts<<<GRIDS,BLOCKS>>>(fIn,fOut,SNL,VW_nl,VW_uz,PE_nl,rho_out,
omega,Nx,Ny,Nz);
}
|
5,669 | #include<iostream>
#include<cstdio>
#include<cstdlib>
#include<cuda_runtime.h>
using namespace std;
__global__ void minimum(int *input)
{
int tid=threadIdx.x;
auto step_size=1;
int number_of_threads=blockDim.x;
while(number_of_threads>0)
{
if(tid<number_of_threads)
{
int first=tid*step_size*2;
int second=first+step_size;
if(input[second]<input[first])
input[first]=input[second];
}
step_size=step_size*2;
number_of_threads/=2;
}
}
__global__ void max(int *input)
{
int tid=threadIdx.x;
auto step_size=1;
int number_of_threads=blockDim.x;
while(number_of_threads>0)
{
if(tid<number_of_threads)
{
int first=tid*step_size*2;
int second=first+step_size;
if(input[second]>input[first])
input[first]=input[second];
}
step_size*=2;
number_of_threads/=2;
}
}
__global__ void sum(int *input)
{
const int tid=threadIdx.x;
auto step_size=1;
int number_of_threads=blockDim.x;
while(number_of_threads>0)
{
if(tid<number_of_threads)
{
const int first=tid*step_size*2;
const int second=first+step_size;
input[first]=input[first]+input[second];
}
step_size = step_size*2;
number_of_threads =number_of_threads/2;
}
}
__global__ void average(int *input)
{
const int tid=threadIdx.x;
auto step_size=1;
int number_of_threads=blockDim.x;
int totalElements=number_of_threads*2;
while(number_of_threads>0)
{
if(tid<number_of_threads)
{
const int first=tid*step_size*2;
const int second=first+step_size;
input[first]=input[first]+input[second];
}
step_size = step_size*2;;
number_of_threads =number_of_threads/2;
}
input[0]=input[0]/totalElements;
}
int main()
{
cout<<"Enter the no of elements"<<endl;
int n;
n=10;
srand(n);
int *arr=new int[n];
int min=20000;
for(int i=0;i<n;i++)
{
arr[i]=rand()%20000;
if(arr[i]<min)
min=arr[i];
cout<<arr[i]<<" ";
}
int size=n*sizeof(int);
int *arr_d,result1;
cudaEvent_t start, stop;
cudaMalloc(&arr_d,size);
cudaMemcpy(arr_d,arr,size,cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
minimum<<<1,n/2>>>(arr_d);
cudaMemcpy(&result1,arr_d,sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f;
cout<<"The minimum element is"<<result1<<endl;
cout<<"average time elapsed:"<< elapsedTime;
cout<<"The min element is"<<min;
//#MAX OPERATION
int *arr_max,maxValue;
cudaMalloc(&arr_max,size);
cudaMemcpy(arr_max,arr,size,cudaMemcpyHostToDevice);
max<<<1,n/2>>>(arr_max);
cudaMemcpy(&maxValue,arr_max,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"The maximum element is"<<maxValue<<endl;
//#SUM OPERATION
int *arr_sum,sumValue;
cudaMalloc(&arr_sum,size);
cudaMemcpy(arr_sum,arr,size,cudaMemcpyHostToDevice);
sum<<<1,n/2>>>(arr_sum);
cudaMemcpy(&sumValue,arr_sum,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"The sum of elements is"<<sumValue<<endl;
cout<<"The average of elements is"<<(sumValue/n)<<endl;
//#AVG OPERATION
int *arr_avg,avgValue;
cudaMalloc(&arr_avg,size);
cudaMemcpy(arr_avg,arr,size,cudaMemcpyHostToDevice);
average<<<1,n/2>>>(arr_avg);
cudaMemcpy(&avgValue,arr_avg,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"The average of elements is "<<avgValue<<endl;
cudaFree(arr_d);
cudaFree(arr_sum);
cudaFree(arr_max);
cudaFree(arr_avg);
return 0;
} |
5,670 | /**
* correlation.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#define GPU_DEVICE 0
/* Problem size */
#define M 2048
#define N 2048
/* Thread block dimensions for kernel 1*/
#define DIM_THREAD_BLOCK_KERNEL_1_X 256
#define DIM_THREAD_BLOCK_KERNEL_1_Y 1
/* Thread block dimensions for kernel 2*/
#define DIM_THREAD_BLOCK_KERNEL_2_X 256
#define DIM_THREAD_BLOCK_KERNEL_2_Y 1
/* Thread block dimensions for kernel 3*/
#define DIM_THREAD_BLOCK_KERNEL_3_X 32
#define DIM_THREAD_BLOCK_KERNEL_3_Y 8
/* Thread block dimensions for kernel 4*/
#define DIM_THREAD_BLOCK_KERNEL_4_X 256
#define DIM_THREAD_BLOCK_KERNEL_4_Y 1
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#define FLOAT_N 3214212.01f
#define EPS 0.005f
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE* data)
{
int i, j;
for (i=0; i < (M+1); i++)
{
for (j=0; j< (N+1); j++)
{
data[i*(N+1) + j] = ((DATA_TYPE) i*j)/ (M+1);
}
}
}
void correlation(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat)
{
int i, j, j1, j2;
// Determine mean of column vectors of input data matrix
for (j = 1; j < (M+1); j++)
{
mean[j] = 0.0;
for (i = 1; i < (N+1); i++)
{
mean[j] += data[i*(M+1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
// Determine standard deviations of column vectors of data matrix.
for (j = 1; j < (M+1); j++)
{
stddev[j] = 0.0;
for (i = 1; i < (N+1); i++)
{
stddev[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]);
}
stddev[j] /= FLOAT_N;
stddev[j] = sqrt_of_array_cell(stddev, j);
stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j];
}
// Center and reduce the column vectors.
for (i = 1; i < (N+1); i++)
{
for (j = 1; j < (M+1); j++)
{
data[i*(M+1) + j] -= mean[j];
data[i*(M+1) + j] /= (sqrt(FLOAT_N)*stddev[j]) ;
}
}
// Calculate the m * m correlation matrix.
for (j1 = 1; j1 < M; j1++)
{
symmat[j1*(M+1) + j1] = 1.0;
for (j2 = j1+1; j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for (i = 1; i < (N+1); i++)
{
symmat[j1*(M+1) + j2] += (data[i*(M+1) + j1] * data[i*(M+1) + j2]);
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
symmat[M*(M+1) + M] = 1.0;
}
void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu)
{
int i,j,fail;
fail = 0;
for (i=1; i < (M+1); i++)
{
for (j=1; j < (N+1); j++)
{
if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
printf("i: %d j: %d\n1: %f 2: %f\n", i, j, symmat[i*N + j], symmat_outputFromGpu[i*N + j]);
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if ((j >= 1) && (j < (M+1)))
{
mean[j] = 0.0;
int i;
for(i=1; i < (N+1); i++)
{
mean[j] += data[i*(M+1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
}
__global__ void std_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if ((j >= 1) && (j < (M+1)))
{
std[j] = 0.0;
int i;
for(i = 1; i < (N+1); i++)
{
std[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]);
}
std[j] /= (FLOAT_N);
std[j] = sqrt(std[j]);
if(std[j] <= EPS)
{
std[j] = 1.0;
}
}
}
__global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1)))
{
data[i*(M+1) + j] -= mean[j];
data[i*(M+1) + j] /= (sqrt(FLOAT_N) * std[j]);
}
}
__global__ void corr_kernel(DATA_TYPE *symmat, DATA_TYPE *data)
{
int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i, j2;
if ((j1 >= 1) && (j1 < M))
{
symmat[j1*(M+1) + j1] = 1.0;
for (j2 = (j1 + 1); j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for(i = 1; i < (N+1); i++)
{
symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2];
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
}
void correlationCuda(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat,
DATA_TYPE* symmat_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *data_gpu;
DATA_TYPE *stddev_gpu;
DATA_TYPE *mean_gpu;
DATA_TYPE *symmat_gpu;
cudaMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1));
cudaMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1));
cudaMalloc((void **)&stddev_gpu, sizeof(DATA_TYPE) * (M+1));
cudaMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * (M+1));
cudaMemcpy(data_gpu, data, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyHostToDevice);
cudaMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyHostToDevice);
cudaMemcpy(stddev_gpu, stddev, sizeof(DATA_TYPE) * (M+1), cudaMemcpyHostToDevice);
cudaMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * (M+1), cudaMemcpyHostToDevice);
dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y);
dim3 grid1((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1);
dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y);
dim3 grid2((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), 1);
dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y);
dim3 grid3((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), (size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_Y)));
dim3 block4(DIM_THREAD_BLOCK_KERNEL_4_X, DIM_THREAD_BLOCK_KERNEL_4_Y);
dim3 grid4((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_4_X)), 1);
t_start = rtclock();
mean_kernel<<< grid1, block1 >>>(mean_gpu,data_gpu);
cudaThreadSynchronize();
std_kernel<<< grid2, block2 >>>(mean_gpu,stddev_gpu,data_gpu);
cudaThreadSynchronize();
reduce_kernel<<< grid3, block3 >>>(mean_gpu,stddev_gpu,data_gpu);
cudaThreadSynchronize();
corr_kernel<<< grid4, block4 >>>(symmat_gpu,data_gpu);
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
DATA_TYPE valueAtSymmatIndexMTimesMPlus1PlusMPoint = 1.0;
cudaMemcpy(&(symmat_gpu[(M)*(M+1) + (M)]), &valueAtSymmatIndexMTimesMPlus1PlusMPoint, sizeof(DATA_TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyDeviceToHost);
cudaFree(data_gpu);
cudaFree(symmat_gpu);
cudaFree(stddev_gpu);
cudaFree(mean_gpu);
}
int main()
{
double t_start, t_end;
DATA_TYPE* data;
DATA_TYPE* mean;
DATA_TYPE* stddev;
DATA_TYPE* symmat;
DATA_TYPE* symmat_outputFromGpu;
data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE));
stddev = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE));
symmat = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
symmat_outputFromGpu = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
init_arrays(data);
GPU_argv_init();
correlationCuda(data, mean, stddev, symmat, symmat_outputFromGpu);
t_start = rtclock();
correlation(data, mean, stddev, symmat);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(symmat, symmat_outputFromGpu);
free(data);
free(mean);
free(stddev);
free(symmat);
free(symmat_outputFromGpu);
return 0;
}
|
5,671 | #include <stdio.h>
#include <iostream>
#include <ctime>
#include <unistd.h>
#include <cmath>
#define N 1000000
#define BLOCK_SIZE 64
#define TIME_CHECK clock()/float(CLOCKS_PER_SEC)
using namespace std;
float hArray[N];
float *dArray;
int blocks;
void prologue(void) {
cudaMalloc((void**)&dArray, N*sizeof(float));
}
void epilogue(void) {
cudaMemcpy(hArray, dArray, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dArray);
}
// Kernel
__global__ void pi(float *arr) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(x < N)
{
double licznik = (x%2)?-1:1;
double mianownik = 2*x+1;
arr[x] = licznik/mianownik;
}
}
int main(int argc, char** argv)
{
float gpu_start_time = 0;
float gpu_post_prologue_time = 0;
float gpu_post_computing_time = 0;
float gpu_end_time = 0;
float cpu_start_time = 0;
float cpu_end_time = 0;
if(argc != 2)
return -1;
double eps = atof(argv[1]);
double x0=1, x1=10000;
cout << eps << endl;
//cpu
cpu_start_time = TIME_CHECK;
int i = 1;
int mianownik = 1;
while(abs(x0 - x1) > eps)
{
x1 = x0;
int licznik = (i%2)?-1:1;
i++;
mianownik += 2;
x0 += (float)licznik/(float)mianownik;
}
x0 = 4 * x0;
cpu_end_time = TIME_CHECK;
printf("%.10f\n", x0);
//gpu
int devCnt;
cudaGetDeviceCount(&devCnt);
if(devCnt == 0) {
perror("No CUDA devices available -- exiting.");
return 1;
}
gpu_start_time = TIME_CHECK;
prologue();
blocks = N / BLOCK_SIZE;
if(N % BLOCK_SIZE)
blocks++;
gpu_post_prologue_time = TIME_CHECK;
pi<<<blocks, BLOCK_SIZE>>>(dArray);
cudaThreadSynchronize();
gpu_post_computing_time = TIME_CHECK;
epilogue();
double sum = 0;
for(int i=0;i<N;i++)
sum += hArray[i];
sum *= 4;
gpu_end_time = TIME_CHECK;
printf("%.10f\n", sum);
cout << "prologue\t" << gpu_post_prologue_time - gpu_start_time << endl;
cout << "counting\t" << gpu_post_computing_time - gpu_post_prologue_time << endl;
cout << "epilogue\t" << gpu_end_time - gpu_post_computing_time << endl;
cout << "cpu\t" << cpu_end_time - cpu_start_time << endl;
return 0;
}
|
5,672 | #include "includes.h"
__global__ void sReduceSum(int *idata,int *odata,unsigned int ncols) {
int i;
unsigned int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int startPos = blockDim.x + threadIdx.x;
int colsPerThread = ncols/blockDim.x;
int blockOffset = threadIdx.x *(ncols/blockDim.x);
int myPart = 0;
for(i=0;i<colsPerThread;i++) {
myPart+=idata[blockOffset+startPos+i];
}
sdata[tid]=myPart;
__syncthreads();
unsigned int s;
for(s=blockDim.x/2;s>0;s>>=1) {
if(tid<s) {
sdata[tid] += sdata[tid+s];
}
__syncthreads();
}
if(tid==0)odata[blockIdx.x]=sdata[0];
} |
5,673 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define MAX_THREADS_PER_BLOCK 1024
void safe_call(cudaError_t ret, int line)
{
if(ret!=cudaSuccess)
{
printf("Error at line %d : %s\n",line,cudaGetErrorString(ret));
exit(-1);
}
}
struct Edge
{
int first;
int second;
};
__global__ void init(int * vertices, int starting_vertex, int num_vertices)
{
int v = blockDim.x*blockIdx.x + threadIdx.x;
if (v==starting_vertex)
vertices[v] = 0;
else
vertices[v] = -1;
}
__global__ void bfs(const Edge * edges, int * vertices, int current_depth, bool * d_over)
{
int e = blockDim.x*blockIdx.x + threadIdx.x;
int vfirst = edges[e].first;
int dfirst = vertices[vfirst];
int vsecond = edges[e].second;
int dsecond = vertices[vsecond];
if ((dfirst == current_depth) && (dsecond == -1))
{
vertices[vsecond] = dfirst + 1;
*d_over = true;
}
if ((dsecond == current_depth) && (dfirst == -1))
{
vertices[vfirst] = dsecond + 1;
*d_over = true;
}
}
int main(int argc, char * argv[])
{
static char * filename;
if(argc>2)
{
printf("./a.out <filename>\n");
exit(-1);
}
else if(argc==2)
{
filename = argv[1];
}
else
{
filename = "../data/input.txt";
}
FILE * fp = fopen(filename,"r");
if(!fp)
{
printf("Error reading file.\n");
exit(-1);
}
int num_vertices, num_edges;
fscanf(fp,"%d %d",&num_vertices,&num_edges);
int num_of_blocks = 1;
int num_of_threads_per_block = num_edges;
if(num_edges>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(num_edges/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
int * vertices_host;
Edge * edges_host;
int * vertices_device;
Edge * edges_device;
vertices_host = (int *) malloc(num_vertices * sizeof(int));
edges_host = (Edge *) malloc(num_edges * sizeof(Edge));
CUDA_SAFE_CALL(cudaMalloc((void **)&vertices_device, num_vertices * sizeof(int)));
CUDA_SAFE_CALL(cudaMalloc((void **)&edges_device, num_edges * sizeof(Edge)));
int edge_id = 0;
for(int i=0;i<num_vertices;i++)
{
int edges_per_vertex;
fscanf(fp,"%d",&edges_per_vertex);
for(int j=0;j<edges_per_vertex;j++)
{
edges_host[edge_id].first = i;
fscanf(fp,"%d",&edges_host[edge_id].second);
edge_id++;
}
}
CUDA_SAFE_CALL(cudaMemcpy((void *)edges_device, (void *)edges_host, num_edges * sizeof(Edge), cudaMemcpyHostToDevice));
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
init<<<grid,threads>>> (vertices_device, 0, num_vertices);
bool stop;
bool * d_over;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_over, sizeof(bool)));
int k=0;
do
{
stop = false;
CUDA_SAFE_CALL(cudaMemcpy(d_over, &stop, sizeof(bool), cudaMemcpyHostToDevice));
bfs<<<grid, threads>>> (edges_device, vertices_device, k, d_over);
CUDA_SAFE_CALL(cudaMemcpy(&stop, d_over, sizeof(bool), cudaMemcpyDeviceToHost));
k++;
}while(stop);
CUDA_SAFE_CALL(cudaMemcpy((void *)vertices_host, (void *) vertices_device, num_vertices * sizeof(int), cudaMemcpyDeviceToHost));
printf("Number of iterations : %d\n",k);
for(int i = 0; i < num_vertices; i++)
{
printf("Vertex %d Distance %d\n",i,vertices_host[i]);
}
free(vertices_host);
free(edges_host);
CUDA_SAFE_CALL(cudaFree(vertices_device));
CUDA_SAFE_CALL(cudaFree(edges_device));
return 0;
}
|
5,674 | #include <cuda.h>
#include <iostream>
using namespace std;
void cudasafe(int error, string message, string file, int line) {
if (error != cudaSuccess) {
cout<<stderr<< " CUDA Error: "<<message<<" : "<<error<<". In "<<file<<" line "<<line<<endl;
exit(-1);
}
}
int main(int argc, char ** argv) {
int deviceCount;
cudasafe(cudaGetDeviceCount(&deviceCount), "GetDeviceCount", __FILE__, __LINE__);
cout<<"Number of CUDA devices: "<<deviceCount<<endl;
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
int cuda_v;
cudasafe(cudaRuntimeGetVersion(&cuda_v), "Get Runtime Version",__FILE__, __LINE__);
cudasafe(cudaGetDeviceProperties(&deviceProp, dev), "Get Device Properties", __FILE__, __LINE__);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
cout<<"No CUDA GPU has been detected\n";
return -1;
} else if (deviceCount == 1) {
cout<<"There is 1 device supporting CUDA\n";
} else {
cout<<"There are "<<deviceCount<<" devices supporting CUDA\n";
}
}
cout<<"For device #"<<dev<<"\n";
cout<<"Device name: "<<deviceProp.name<<endl;
cout<<"CUDA Version: "<<cuda_v<<endl;
cout<<"Major revision number: "<<deviceProp.major<<endl;
cout<<"Minor revision Number: "<<deviceProp.minor<<endl;
cout<<"Total Global Memory: "<<deviceProp.totalGlobalMem<<endl;
cout<<"Total shared mem per block: "<<deviceProp.sharedMemPerBlock<<endl;
cout<<"Total const mem size: "<<deviceProp.totalConstMem<<endl;
cout<<"Warp size: "<<deviceProp.warpSize<<endl;
cout<<"Maximum block dimensions: "<<deviceProp.maxThreadsDim[0]
<<" x "<<deviceProp.maxThreadsDim[1]<<" x "<<deviceProp.maxThreadsDim[2]<<""<<endl;
cout<<"Maximum grid dimensions: "<<deviceProp.maxGridSize[0]<<" x "
<<deviceProp.maxGridSize[1]<<" x "<<deviceProp.maxGridSize[2]<<""<<endl;
cout<<"Clock Rate: "<<deviceProp.clockRate<<endl;
cout<<"Number of muliprocessors: "<<deviceProp.multiProcessorCount<<endl;
}
return 0;
}
|
5,675 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <cmath>
#include <time.h>
using namespace std;
class Matrix
{
private:
int rozmiar_macierzy;
double** macierz;
public:
Matrix(int rozmiar)
{
rozmiar_macierzy = rozmiar;
macierz = new double* [rozmiar];
for (int i = 0; i < rozmiar; i++) {
macierz[i] = new double[rozmiar];
}
}
void free_memory();
void random_values();
int get_size();
void set_value(int line, int column, double value);
double get_value(int line, int column);
Matrix transposition();
void write_matrix();
};
void Matrix::free_memory()
{
delete[] macierz;
}
void Matrix::random_values()
{
int rozmiar = rozmiar_macierzy;
for (int i = 0; i < rozmiar; i++) {
for (int j = 0; j < rozmiar; j++) {
macierz[i][j] = 2 * ((double)rand() / (double)RAND_MAX) - 1;
}
}
}
int Matrix::get_size()
{
return rozmiar_macierzy;
}
void Matrix::set_value(int line, int column, double value)
{
macierz[line][column] = value;
}
double Matrix::get_value(int line, int column)
{
return macierz[line][column];
}
Matrix Matrix::transposition()
{
int rozmiar = rozmiar_macierzy;
Matrix At(rozmiar);
for (int i = 0; i < rozmiar; i++) {
for (int j = 0; j < rozmiar; j++) {
At.set_value(i, j, macierz[j][i]);
}
}
return At;
}
void Matrix::write_matrix()
{
int rozmiar = rozmiar_macierzy;
for (int i = 0; i < rozmiar; i++) {
cout << "| ";
for (int j = 0; j < rozmiar; j++) {
cout << macierz[i][j] << " ";
}
cout << "|" << endl;
}
cout << endl;
}
Matrix multiplication(Matrix A, Matrix B)
{
int rozmiar = A.get_size();
double sum;
Matrix C(rozmiar);
for (int k = 0; k < rozmiar; k++) {
for (int l = 0; l < rozmiar; l++) {
sum = 0;
for (int m = 0; m < rozmiar; m++) {
sum = sum + A.get_value(k, m) * B.get_value(m, l);
}
C.set_value(k, l, sum);
}
}
return C;
}
Matrix addition(Matrix A, Matrix B)
{
int rozmiar = A.get_size();
double sum;
Matrix D(rozmiar);
for (int i = 0; i < rozmiar; i++) {
for (int j = 0; j < rozmiar; j++) {
sum = A.get_value(i, j) + B.get_value(i, j);
D.set_value(i, j, sum);
}
}
return D;
}
void copy_values(Matrix macierz1, double macierz2[])
{
int rozmiar = macierz1.get_size();
for (int i = 0; i < rozmiar*rozmiar; i++) {
macierz2[i] = macierz1.get_value(i/rozmiar, i%rozmiar);
}
}
__global__ void deviceTransposition(double* macierz1, double* macierz2, int* rozmiar)
{
unsigned long long int i = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long long int siatka = blockDim.x * gridDim.x;
for (i; i < rozmiar[0]*rozmiar[0]; i += siatka) {
macierz2[i] = macierz1[(i%rozmiar[0])*rozmiar[0]+i/rozmiar[0]];
}
}
__global__ void deviceMultiplication(double* macierz1, double* macierz2, double* macierz3, int* rozmiar)
{
unsigned long long int i = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long long int j = threadIdx.y + blockIdx.y * blockDim.y;
unsigned long long int siatkax = blockDim.x * gridDim.x;
unsigned long long int siatkay = blockDim.y * gridDim.y;
double sum;
for (i; i < rozmiar[0] * rozmiar[0]; i += siatkax) {
sum = 0;
for (j; j < rozmiar[0]; j += siatkay) {
sum = sum + macierz1[((i/rozmiar[0]) * rozmiar[0]) + j] * macierz2[i%rozmiar[0] + j*rozmiar[0]];
}
macierz3[i] = sum;
}
}
__global__ void deviceAddition(double* macierz1, double* macierz2, double* macierz3, int* rozmiar)
{
unsigned long long int i = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long long int siatka = blockDim.x * gridDim.x;
for (i; i < rozmiar[0] * rozmiar[0]; i += siatka) {
macierz3[i] = macierz2[i] + macierz1[i];
}
}
int main()
{
srand(time(NULL));
while (true) {
int rozmiar[1];
cout << "Podaj rozmiar macierzy: ";
cin >> rozmiar[0];
Matrix A(rozmiar[0]);
Matrix B(rozmiar[0]);
Matrix C(rozmiar[0]);
Matrix D(rozmiar[0]);
A.random_values();
B.random_values();
/*cout << "macierz A:" << endl;
A.write_matrix();
cout << "macierz B:" << endl;
B.write_matrix();*/
clock_t start;
double duration_on_CPU;
start = clock();
C = multiplication(A, B);
/*cout << "Macierz C na CPU:" << endl;
C.write_matrix();*/
duration_on_CPU = 1000 * (clock() - start) / CLOCKS_PER_SEC;
cout << "A*B na CPU zajelo: " << duration_on_CPU << " milisekund." << endl;
double duration_on_CPU1;
start = clock();
D = addition(addition(multiplication(A, A.transposition()), multiplication(B, B.transposition())), multiplication(C, C.transposition()));
/*cout << "Macierz D na CPU:" << endl;
D.write_matrix();*/
duration_on_CPU1 = 1000 * (clock() - start) / CLOCKS_PER_SEC;
cout << "A*AT + B*BT + C*CT na CPU zajelo: " << duration_on_CPU1 << " milisekund." << endl;
cout << "Ostatni element C: " << C.get_value(rozmiar[0] - 1, rozmiar[0] - 1) << endl;
cout << "Ostatni element D: " << D.get_value(rozmiar[0] - 1, rozmiar[0] - 1) << endl;
double* A1 = new double[rozmiar[0] * rozmiar[0]];
double* B1 = new double[rozmiar[0] * rozmiar[0]];
double* C1 = new double[rozmiar[0] * rozmiar[0]];
double* D1 = new double[rozmiar[0] * rozmiar[0]];
copy_values(A, A1);
copy_values(B, B1);
double* dev_A;
double* dev_At;
double* dev_B;
double* dev_Bt;
double* dev_C;
double* dev_Ct;
double* dev_D;
double* dev_wynik;
double* dev_wynik1;
int* dev_rozmiar;
int rozmiarBloku = 1024;
int liczbaBlokow = (rozmiar[0] * rozmiar[0] + rozmiarBloku - 1) / rozmiarBloku;
int sization = rozmiar[0] * rozmiar[0] * sizeof(double);
cudaMalloc((void**)&dev_rozmiar, sizeof(int));
cudaMalloc((void**)&dev_A, sization);
cudaMalloc((void**)&dev_At, sization);
cudaMalloc((void**)&dev_B, sization);
cudaMalloc((void**)&dev_Bt, sization);
cudaMalloc((void**)&dev_C, sization);
cudaMalloc((void**)&dev_Ct, sization);
cudaMalloc((void**)&dev_D, sization);
cudaMalloc((void**)&dev_wynik, sization);
cudaMalloc((void**)&dev_wynik1, sization);
cudaMemcpy(dev_A, A1, sization, cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, B1, sization, cudaMemcpyHostToDevice);
cudaMemcpy(dev_rozmiar, rozmiar, sizeof(int), cudaMemcpyHostToDevice);
clock_t start2;
double duration_on_GPU;
start2 = clock();
deviceMultiplication<<<liczbaBlokow, rozmiarBloku>>>(dev_A, dev_B, dev_C, dev_rozmiar);
cudaDeviceSynchronize();
duration_on_GPU = 1000 * (clock() - start2) / CLOCKS_PER_SEC;
cudaMemcpy(C1, dev_C, sization, cudaMemcpyDeviceToHost);
cout << "A*B na GPU zajelo: " << duration_on_GPU << " milisekund." << endl;
/*cout << "Macierz C na GPU:" << endl;
for (int i = 0; i < rozmiar[0] * rozmiar[0]; i++) {
if (i % rozmiar[0] == 0) cout << endl;
cout << C1[i] << " | ";
}
cout << endl;*/
double duration_on_GPU1;
start2 = clock();
deviceTransposition<<<liczbaBlokow, rozmiarBloku>>>(dev_A, dev_At, dev_rozmiar);
cudaDeviceSynchronize();
deviceMultiplication<<<liczbaBlokow, rozmiarBloku>>>(dev_A, dev_At, dev_D, dev_rozmiar);
cudaDeviceSynchronize();
deviceTransposition<<<liczbaBlokow, rozmiarBloku>>>(dev_B, dev_Bt, dev_rozmiar);
cudaDeviceSynchronize();
deviceMultiplication<<<liczbaBlokow, rozmiarBloku>>>(dev_B, dev_Bt, dev_wynik, dev_rozmiar);
cudaDeviceSynchronize();
deviceTransposition<<<liczbaBlokow, rozmiarBloku>>>(dev_C, dev_Ct, dev_rozmiar);
cudaDeviceSynchronize();
deviceMultiplication<<<liczbaBlokow, rozmiarBloku>>>(dev_C, dev_Ct, dev_wynik1, dev_rozmiar);
cudaDeviceSynchronize();
deviceAddition<<<liczbaBlokow, rozmiarBloku>>>(dev_wynik, dev_D, dev_D, dev_rozmiar);
cudaDeviceSynchronize();
deviceAddition<<<liczbaBlokow, rozmiarBloku>>>(dev_wynik1, dev_D, dev_D, dev_rozmiar);
cudaDeviceSynchronize();
duration_on_GPU1 = 1000 * (clock() - start2) / CLOCKS_PER_SEC;
cudaMemcpy(D1, dev_D, sization, cudaMemcpyDeviceToHost);
cout << "A*AT + B*BT + C*CT na GPU zajelo: " << duration_on_GPU1 << " milisekund." << endl;
cout << "Ostatni element C: " << C1[rozmiar[0] * rozmiar[0] - 1] << endl;
cout << "Ostatni element D: " << D1[rozmiar[0] * rozmiar[0] - 1] << endl;
/*cout << "Macierz D na GPU:" << endl;
for (int i = 0; i < rozmiar[0] * rozmiar[0]; i++) {
if (i % rozmiar[0] == 0) cout << endl;
cout << D1[i] << " | ";
}
cout << endl;*/
cout << endl;
double stosunek;
if (duration_on_GPU != 0) {
stosunek = duration_on_CPU / duration_on_GPU;
cout << "Pierwsze obliczenia na GPU sa " << stosunek << " raza szybsze." << endl;
}
else {
cout << "Pierwszy czas na GPU jest zerowy, niemozliwe wyliczenie stosunku" << endl;
}
if (duration_on_GPU1 != 0) {
stosunek = duration_on_CPU1 / duration_on_GPU1;
cout << "Drugie obliczenia na GPU sa " << stosunek << " raza szybsze." << endl;
}
else {
cout << "Drugi czas na GPU jest zerowy, niemozliwe wyliczenie stosunku" << endl;
}
cout << endl;
double max = 0;
for (int i = 0; i < rozmiar[0] * rozmiar[0]; i++) {
if (abs(C.get_value(i / rozmiar[0], i % rozmiar[0]) - C1[i]) > max) max = abs(C.get_value(i / rozmiar[0], i % rozmiar[0]) - C1[i]);
}
if (max == 0) cout << "Nie ma roznicy miedzy macierzami C z CPU i GPU" << endl;
else cout << "W miejscu najwiekszej rozbieznosci macierze C roznia sie o " << max << endl;
max = 0;
for (int i = 0; i < rozmiar[0] * rozmiar[0]; i++) {
if (abs(D.get_value(i / rozmiar[0], i % rozmiar[0]) - D1[i]) > max) max = abs(D.get_value(i / rozmiar[0], i % rozmiar[0]) - D1[i]);
}
if (max == 0) cout << "Nie ma roznicy miedzy macierzami D z CPU i GPU" << endl;
else cout << "W miejscu najwiekszej rozbieznosci macierze D roznia sie o " << max << endl;
cout << endl;
A.free_memory();
B.free_memory();
C.free_memory();
D.free_memory();
cudaFree(dev_rozmiar);
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
cudaFree(dev_D);
cudaFree(dev_wynik);
delete[] A1;
delete[] B1;
delete[] C1;
delete[] D1;
}
return 0;
} |
5,676 | #include <stdio.h>
#include <cuda_runtime.h>
// #include <helper_cuda.h>
#define N 20
__global__ void doubleElements(int *a){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N)
a[i] *= 2;
}
int main(void){
cudaError_t err = cudaSuccess;
size_t size = N * sizeof(int);
int *a;
cudaMallocManaged(&a, size); // Use `a` on the CPU and/or on any GPU in the accelerated system.
for(int i = 0; i < N; i++)
a[i] = i;
doubleElements<<<2,10>>>(a);
if ((err = cudaGetLastError()) != cudaSuccess){
fprintf(stderr, "Failed to launch kernel: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
for(int i = 0; i < N; i++)
printf("%d ", a[i]);
printf("\n");
cudaFree(a);
return 0;
} |
5,677 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27) {
if (comp <= (var_4 + (var_5 / (+1.8434E-22f - -0.0f - +1.3563E-43f * -1.4734E5f)))) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
for (int i=0; i < var_3; ++i) {
comp = (-1.9708E-36f - -1.6806E-35f * var_6);
float tmp_1 = expf((-1.6058E0f + +1.0675E-41f));
comp = tmp_1 + cosf(atan2f((var_7 * var_8), var_9 * (-0.0f * (-1.3537E-43f - acosf(var_10 * var_11 * var_12)))));
if (comp < (+1.2515E14f / -0.0f)) {
float tmp_2 = var_13 - expf((var_14 / var_15 * var_16));
float tmp_3 = +1.2959E-42f;
comp += tmp_3 * tmp_2 * acosf((var_17 + -1.3196E2f / (var_18 + (var_19 - var_20))));
}
if (comp >= log10f(-1.1135E36f)) {
comp = (+1.6783E-27f / (var_21 / var_22 - -1.2784E25f / (-1.5162E-44f / var_23)));
comp = cosf(-1.5353E-42f);
comp += (var_24 * atan2f(-1.6103E36f, (+1.3432E36f + -1.6133E19f + coshf((var_25 + (+1.4884E-37f - -0.0f * var_26 - var_27 / -1.8100E35f))))));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28);
cudaDeviceSynchronize();
return 0;
}
|
5,678 | #include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
using namespace std;
#define TILE 10
__global__ void multiplys(int m, int k, int n, int *A, int *B, int *C)
{
__shared__ int dA[TILE][TILE];
__shared__ int dB[TILE][TILE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
int Cvalue = 0;
for(int t = 0; t < (n-1)/TILE + 1; t++)
{
if(Row <m && (t * TILE + tx)< n)
dA[ty][tx] = A[Row*n + t*TILE+tx];
else
dA[ty][tx] = 0;
if((t*TILE +ty)<n && Col < k)
dB[ty][tx] = B[(t*TILE+ty)*k + Col];
else
dB[ty][tx] = 0;
__syncthreads();
for(int i = 0; i < TILE; i++)
Cvalue += dA[ty][i] * dB[i][tx];
__syncthreads();
}
if(Row <m && Col <k)
C[Row*k+Col] = Cvalue;
}
int main(int argc, char* argv[])
{
char file1[100], file2[100], file3[100];
strcpy(file3,argv[1]);
strcpy(file1,argv[2]);
strcpy(file2,argv[3]);
FILE *handle1 = fopen(file1, "r");
FILE *handle2 = fopen(file2, "r");
FILE *handle3 = fopen(file3,"r");
int m,n,k;
fscanf(handle1, "%d", &m);
fscanf(handle1, "%d", &k);
fscanf(handle2, "%d", &k);
fscanf(handle2, "%d", &n);
fscanf(handle3, "%d", &m);
fscanf(handle3, "%d", &n);
int (*pA), (*pB), (*pC);
int i,j;
int a[500*500], b[500*500], c[500*500], c_ans[500*500];
for(i=0;i<m;i++)
for(j=0;j<k;j++)
{
fscanf(handle1, "%d", &a[i*k + j]);
}
for(i=0;i<k;i++)
for(j=0;j<n;j++)
{
fscanf(handle2, "%d", &b[i*n + j]);
}
for(i=0;i<m;i++)
for(j=0;j<n;j++)
{
fscanf(handle3, "%d", &c_ans[i*n + j]);
}
cudaMalloc((void**)&pA, (m*k)*sizeof(int));
cudaMalloc((void**)&pB, (k*n)*sizeof(int));
cudaMalloc((void**)&pC, (m*n)*sizeof(int));
cudaMemcpy(pA, a, (m*k)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pB, b, (k*n)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pC, c, (m*n)*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(TILE, TILE);
dim3 dimGrid(max(m,max(n,k))/dimBlock.x+1, max(m,max(k,n))/dimBlock.y+1);
//cout<<dimGrid.x<<" "<<dimGrid.y<<endl;
multiplys<<<dimGrid,dimBlock>>>(m,n,k,pA,pB,pC);
cudaMemcpy(c, pC, (m*n)*sizeof(int), cudaMemcpyDeviceToHost);
cout<<"Verifying results: \n";
int flag = 1;
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
{
if(c[i*n + j] != c_ans[i*n+ j])
{
flag = 0;
cout<<"Wrong answer\n" << c[i*n + j]<<" "<<c_ans[i*n + j]<<endl<<i<<" "<<j<<endl;
break;
}
}
if(!flag)
break;
}
if(flag)
cout<<"Answer verified\n";
cudaFree(pA);
cudaFree(pB);
cudaFree(pC);
} |
5,679 | #include <iostream>
#include <cmath>
// nvcc cuda.cu -Xcompiler=-fPIC -g -gencode arch=compute_12,code=sm_12
__global__
void add(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
y[i] += x[i];
}
}
__global__
void init(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
x[i] = i;
y[i] = i;
}
}
#define CHECK(statement) { \
cudaError_t res = (statement); \
if (res != 0) { \
std::cout << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": "<< #statement << ": " << cudaGetErrorString(res) << "\n"; \
exit(1); \
} \
}
int main(void) {
int N = 1<<20;
float *x;
CHECK(cudaMalloc(&x, N * sizeof(float)));
float *y;
CHECK(cudaMalloc(&y, N * sizeof(float)));
struct { float *x, *y; } host;
CHECK(cudaMallocHost(&host.x, sizeof(float) * N));
CHECK(cudaMallocHost(&host.y, sizeof(float) * N));
int blockSize = 512;
int numBlocks = (N + blockSize - 1) / blockSize;
for (int i = 0; i < 1<<20; ++i) {
init<<<numBlocks, blockSize>>>(N, x, y);
CHECK(cudaDeviceSynchronize());
add<<<numBlocks, blockSize>>>(N, x, y);
CHECK(cudaDeviceSynchronize());
}
CHECK(cudaMemcpy(host.x, x, sizeof(float) * N, cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(host.y, y, sizeof(float) * N, cudaMemcpyDeviceToHost));
cudaFree(x);
cudaFree(y);
cudaFreeHost(host.x);
cudaFreeHost(host.y);
return 0;
} |
5,680 | /**
* jrc_cuda_rho.cu
* block loading rho calculation. should be much faster
* system('nvcc -ptx -m 64 -arch sm_35 jrc_cuda_rho.cu')
* i1 is multiple of chunk (16)
* J. James Jun, Vidrio Technologies, LLC., 2017 Jun 11
* 7/13/17: fDc_spk option added, which uses spike-specific distance cut-off (dc)
*/
#include <cuda_runtime.h>
#include <math.h>
#define ABS(my_val) ((my_val) < 0) ? (-1*(my_val)) : (my_val)
#define MIN(A,B) ((A)<(B)) ? (A) : (B)
#define MAX(A,B) ((A)>(B)) ? (A) : (B)
#define NTHREADS 128
#define MAXDIM 45
#define CHUNKSIZE 16
#define SINGLE_INF (3.402E+38) // equivalent to NAN. consider -1 value
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
* Step through one B at a time
*/
__global__ void jrc_cuda_rho(float *rho, const float *site_features, const int *spike_order, const int *site_constants, const float dist_cut2) {
int i1 = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNKSIZE; // base index of i1
int thread_x = threadIdx.x; // nThreadsGPU for i12 index
int i1_thread_x = i1 + thread_x;
int n_spikes_primary = site_constants[0];
int n_spikes_all = site_constants[1];
int n_features = site_constants[2];
int dn_max = site_constants[3];
int fDc_spk = site_constants[4];
__shared__ int spike_order_chunk[CHUNKSIZE];
__shared__ float features_primary[MAXDIM][CHUNKSIZE];
__shared__ int rho_chunk[NTHREADS][CHUNKSIZE]; // count then divide later
__shared__ int mnComp1_[NTHREADS][CHUNKSIZE]; // count number of elements compared
__shared__ float vrDc1_[CHUNKSIZE]; // use if fDc_spk=1
// cache shared memory
if (thread_x < n_features) {
for (int i_c = 0; i_c < CHUNKSIZE; i_c++) {
int i1_c = i_c + i1;
if (i1_c < n_spikes_primary) {
features_primary[thread_x][i_c] = site_features[thread_x + i1_c * n_features];
} else {
features_primary[thread_x][i_c] = 0.0f;
}
}
}
if (thread_x < CHUNKSIZE && i1_thread_x < n_spikes_primary) {
spike_order_chunk[thread_x] = spike_order[i1_thread_x];
}
// initialize rho
for (int i_c = 0; i_c < CHUNKSIZE; i_c++) {
rho_chunk[thread_x][i_c] = 0;
mnComp1_[thread_x][i_c] = 0;
}
// calculate spike-specific distance cut-off vrDc1_ only if fDc_spk==1
if (thread_x < CHUNKSIZE && fDc_spk == 1) {
vrDc1_[thread_x] = 0.0f; //init
for (int i_feature = 0; i_feature < n_features; i_feature++) {
float temp = features_primary[i_feature][thread_x];
vrDc1_[thread_x] += (temp * temp);
}
vrDc1_[thread_x] *= dist_cut2;
}
__syncthreads();
// Inspect distance relationship between i1 and i12_tx
for (int i12_tx = thread_x; i12_tx < n_spikes_all; i12_tx += blockDim.x) {
int iiSpk12_ord_tx = spike_order[i12_tx];
// compute distance
float feature_dists2_chunk[CHUNKSIZE]; // square of pairwise feature distances for chunk
for (int i_c = 0; i_c < CHUNKSIZE; i_c++) {
feature_dists2_chunk[i_c] = 0.0f;
}
for (int i_feature = 0; i_feature < n_features; i_feature++) {
float fet12_tx = site_features[i_feature + i12_tx * n_features];
for (int i_c = 0; i_c < CHUNKSIZE; ++i_c) {
float temp = fet12_tx - features_primary[i_feature][i_c]; // z_i = x_i - y_i
feature_dists2_chunk[i_c] += temp * temp; // dist += z_i^2
}
}
// Compare the index and distance
for (int i_c = 0; i_c < CHUNKSIZE; ++i_c) {
int time_dist = ABS(spike_order_chunk[i_c] - iiSpk12_ord_tx);
if (time_dist <= dn_max) {
++mnComp1_[thread_x][i_c];
if (fDc_spk == 0) {
if (feature_dists2_chunk[i_c] <= dist_cut2) {
++rho_chunk[thread_x][i_c];
}
} else {
if (feature_dists2_chunk[i_c] < vrDc1_[i_c]) {
++rho_chunk[thread_x][i_c];
}
}
}
}
} // for
// final count
__syncthreads();
if (thread_x < CHUNKSIZE) { // use thread_x as i_c
int nRho1 = 0;
int nComp1 = 0;
for (int tx1 = 0; tx1 < blockDim.x; tx1++) {
nRho1 += rho_chunk[tx1][thread_x];
nComp1 += mnComp1_[tx1][thread_x];
}
if (i1_thread_x < n_spikes_primary) {
rho[i1_thread_x] = (float)(((double) (nRho1)) / ((double) nComp1));
}
}
} |
5,681 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void dotProduct( float* matrixA, float* matrixB, float* matrixC, int n) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
float value = 0;
int k;
for (k = 0; k < n; k++) {
value += matrixA[i * n + k] * matrixB[k * n + j];
}
matrixC[i * n + j] = value;
}
int main() {
int size = 20;
//host
float *matrixA = (float*) malloc(size * size * sizeof(float));
float *matrixB = (float*) malloc(size * size * sizeof(float));
float *matrixC = (float*) malloc(size * size * sizeof(float));
//device
float *matrixA_d;
float *matrixB_d;
float *matrixC_d;
int i = 0;
while (i < size * size) {
matrixA[i] = rand()%100;
matrixB[i] = 1;
i++;
}
cudaMalloc((void**)&matrixA_d, size * size * sizeof(float));
cudaMalloc((void**)&matrixB_d, size * size * sizeof(float));
cudaMalloc((void**)&matrixC_d, size * size * sizeof(float));
dim3 dimBlock(size , size);
dim3 dimGrid(size / dimBlock.x , size / dimBlock.y);
cudaMemcpy(matrixA_d, matrixA, size * size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(matrixB_d, matrixB, size * size * sizeof(float), cudaMemcpyHostToDevice);
dotProduct<<<dimGrid,dimBlock>>>(matrixA_d,matrixB_d,matrixC_d,size);
cudaMemcpy(matrixC, matrixC_d, size * size * sizeof(float), cudaMemcpyDeviceToHost);
for (i = 0; i < size * size; i ++) {
if(i > 0 && i % size == 0)
printf("\n");
printf("%.2f ", matrixC[i]);
}
printf("\n");
} |
5,682 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#ifndef NDEBUG
#define CHECK_STATUS(status) \
if (status != cudaSuccess) \
fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\
cudaGetErrorString(status))
#else
#define CHECK_STATUS(status) status
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////
// 矩阵数据结构定义,行优先存储。
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// block大小
#define BLOCK_SIZE 16
// 获取矩阵的一个元素
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// 设置矩阵的一个元素
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.elements[row * A.stride + col] = value;
}
// 在A中,获取一个大小为BLOCK_SIZExBLOCK_SIZE的子矩阵
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return Asub;
}
// 声明矩阵相乘的核函数
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// C=A*B,矩阵乘法
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// 把A和B复制到设备内存
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
CHECK_STATUS(cudaMalloc(&d_A.elements, size));
CHECK_STATUS(cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice));
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
CHECK_STATUS(cudaMalloc(&d_B.elements, size));
CHECK_STATUS(cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice));
// 在设备上分配C的内存
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
CHECK_STATUS(cudaMalloc(&d_C.elements, size));
// 调用kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
CHECK_STATUS(cudaGetLastError());
// 从设备内存中把C复制到主机内存
CHECK_STATUS(cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost));
// 释放设备内存
CHECK_STATUS(cudaFree(d_A.elements));
CHECK_STATUS(cudaFree(d_B.elements));
CHECK_STATUS(cudaFree(d_C.elements));
}
// 矩阵乘法,每个thread完成一行乘以一列
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// 获取本Block的id
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// 每个block计算C的一个子矩阵
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// 每个线程计算Csub中的一个元素
float Cvalue = 0;
// 线程在block内坐标
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
// 遍历计算Csub所需要的A和B的子矩阵
// 让子矩阵与子矩阵相乘,并累加Cvalue
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
// 获取A的子矩阵Asub
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
// 获取B的子矩阵Bsub
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
// block内,用来存Asub和Bsub的共享内存
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
// 把数据从设备全局内存复制到共享内存
// 每个线程复制子矩阵的一个元素
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
// 同步,保证子矩阵的所有元素都已经复制到共享内存之内
__syncthreads();
// Multiply Asub and Bsub together
// 计算As的第row行和Bs的第col列
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
// 同步,保证block的所有线程都完成上述计算
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
// 保存Cvalue到Csub,也就是保存到设备内存
// 每个线程保存一个元素
SetElement(Csub, row, col, Cvalue);
}
// 分配矩阵内存并赋值
void newMatrix(Matrix *matrix, uint height, uint width, float value){
matrix->height = height;
matrix->width = width;
matrix->stride = width;
matrix->elements = new float[width*height];
for(size_t i=0;i<height*width;i++)
matrix->elements[i] = value;
}
// 释放矩阵内存
void freeMatrix(Matrix *matrix){
delete[] matrix->elements;
matrix->elements = nullptr;
}
// 打印矩阵内存
void printMatrix(Matrix matrix){
for(size_t i=0;i<matrix.height;i++){
printf("row:%lu\t",i);
for(size_t j=0;j<matrix.width;j++){
printf("%.2f\t",matrix.elements[i*matrix.width+j]);
}
printf("\n");
}
}
int main(int argc, char **argv) {
CHECK_STATUS(cudaSetDevice(0));
Matrix a,b,c;
newMatrix(&a,64,16,1);
newMatrix(&b,16,16,1);
newMatrix(&c,64,16,1);
MatMul(a,b,c);
printMatrix(c);
freeMatrix(&a);
freeMatrix(&b);
freeMatrix(&c);
return 0;
}
|
5,683 | #include "includes.h"
#define BLOCK_SIZE 512
#define BLOCK_SIZE_HOUGH 360
#define STEP_SIZE 5
#define NUMBER_OF_STEPS 360/STEP_SIZE
// Circ mask kernel storage
__constant__ int maskKernelX[NUMBER_OF_STEPS];
__constant__ int maskKernelY[NUMBER_OF_STEPS];
// Function to set precalculated relative coordinates for circle boundary coordinates
__global__ void ImageScalingKernel(float *imgOut, float *imgIn, int width, int height)
{
__shared__ float inData[BLOCK_SIZE];
// Get the index of pixel
const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Load data to shared variable
inData[threadIdx.x] = imgIn[index];
if ( index < (width*height) ) {
imgOut[index] = inData[threadIdx.x] / (float)255;
}
__syncthreads();
} |
5,684 | /*****************************************************************************
C-DAC Tech Workshop : hyPACK-2013
October 15-18, 2013
Example : singleStream.cu
Objective : Write a CUDA program to add the values of two array and
print the execution time in ms using streams.
Input : None
Output : Execution in ms
Created : August-2013
E-mail : hpcfte@cdac.in
****************************************************************************/
#include<stdio.h>
#define sizeOfArray 1024*1024
/*CUDA safe call to handle the error efficiently */
void CUDA_SAFE_CALL(cudaError_t call)
{
cudaError_t ret = call;
//printf("RETURN FROM THE CUDA CALL:%d\t:",ret);
switch(ret)
{
case cudaSuccess:
//printf("Success\n");
break;
case cudaErrorInvalidValue:
{
printf("ERROR: InvalidValue:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidDevicePointer:
{
printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidMemcpyDirection:
{
printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__);
exit(-1);
break;
}
default:
{
printf(" ERROR at line :%i.%d' '%s\n",__LINE__,ret,cudaGetErrorString(ret));
exit(-1);
break;
}
}
}
/*The function of this kernel is to add the values of two arrays copied from host to device*/
__global__ void arrayAddition(int *device_a, int *device_b, int *device_result)
{
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
if(threadId < sizeOfArray)
device_result[threadId] = device_a[threadId] + device_b[threadId];
}
int main(int argc, char **argv)
{
cudaDeviceProp prop;
int whichDevice, *host_a, *host_b, *host_result, *device_a, *device_b, *device_result;
CUDA_SAFE_CALL(cudaGetDevice(&whichDevice));
CUDA_SAFE_CALL(cudaGetDeviceProperties(&prop, whichDevice));
if(!prop.deviceOverlap)
{
printf("Device will not handle overlaps, so no speed up from the stream \n");
return 0;
}
cudaEvent_t start, stop;
float elapsedTime;
/*Cuda event created */
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
/*Stream created*/
cudaStream_t stream;
CUDA_SAFE_CALL(cudaStreamCreate(&stream));
/*Allocatig memory for host array and dvice array*/
CUDA_SAFE_CALL(cudaHostAlloc((void **)&host_a, sizeOfArray*sizeof(int), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&host_b, sizeOfArray*sizeof(int), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaHostAlloc((void **)&host_result, sizeOfArray*sizeof(int), cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaMalloc((void **)&device_a, sizeOfArray* sizeof(int)));
CUDA_SAFE_CALL(cudaMalloc((void **)&device_b, sizeOfArray* sizeof(int)));
CUDA_SAFE_CALL(cudaMalloc((void **)&device_result, sizeOfArray* sizeof(int)));
/*Assigning values to host array */
for(int index = 0; index < sizeOfArray; index++)
{
host_a[index] = rand()%10;
host_b[index] = rand()%10;
}
/* Copying of memory from host to deice */
CUDA_SAFE_CALL(cudaMemcpyAsync(device_a, host_a, sizeOfArray*sizeof(int), cudaMemcpyHostToDevice, stream));
CUDA_SAFE_CALL(cudaMemcpyAsync(device_b, host_b, sizeOfArray*sizeof(int), cudaMemcpyHostToDevice, stream));
/*Kernel call*/
arrayAddition<<<sizeOfArray/256, 256, 0, stream>>>(device_a, device_b, device_result);
CUDA_SAFE_CALL(cudaMemcpyAsync(device_result, host_result, sizeOfArray*sizeof(int), cudaMemcpyHostToDevice, stream));
CUDA_SAFE_CALL(cudaStreamSynchronize(stream));
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("*****************CDAC - Tech Workshop :HeGaPa2012**************\n");
printf(" \t\t\t july 16-22 \n\n");
printf("Size of array : %d\n", sizeOfArray);
printf("Time taken: %3.1f ms\n", elapsedTime);
CUDA_SAFE_CALL(cudaFreeHost(host_a));
CUDA_SAFE_CALL(cudaFreeHost(host_b));
CUDA_SAFE_CALL(cudaFreeHost(host_result));
CUDA_SAFE_CALL(cudaFree(device_a));
CUDA_SAFE_CALL(cudaFree(device_b));
CUDA_SAFE_CALL(cudaFree(device_result));
return 0;
}
|
5,685 | //ly2352, Lu Yang, Adaboost, Host version
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <cuda_runtime.h>
#define nums 2000
#define cols 256
//int nums = 200,cols = 256;
float **usps;
float *w;
float *d_w;
float *d_sum_w;
int *y;
int *d_y;
float *d_vec, *d_err1, *d_err2;
struct pars{
int return_j;
float theta;
int return_m;
};
void cuda_checker(cudaError_t err){
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements){
__shared__ float sum[nums];
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
sum[i] = A[i] + B[i];
}
__syncthreads();
float tmp = 0.0;
for (int i = 0; i < nums; ++i){
tmp+=sum[i];
}
*C = tmp;
}
__global__ void
vectorAdd_train(const float *vec, const float *w, const int *y,
float *err1, float *err2, float *sum_w,int numElements, float boundary){
__shared__ float sum1[nums];
__shared__ float sum2[nums];
int z = blockDim.x * blockIdx.x + threadIdx.x;
if (z < numElements)
{
sum1[z] = w[z] * ((vec[z]<=boundary) ^ (y[z]==-1));
sum2[z] = w[z] * ((vec[z]<=boundary) ^ (y[z]==1));
}
__syncthreads();
float tmp1 = 0.0 , tmp2 = 0.0 , tmp3 = 0.0;
for (int i = 0; i < nums; ++i){
tmp1+=sum1[i];
tmp2+=sum2[i];
tmp3+=w[i];
}
*err1 = tmp1;
*err2 = tmp2;
*sum_w = tmp3;
}
__global__ void
vectorAdd_train2d(const float *vec, const float *w, const int *y,
float *min_out,int * cur_i_out,int * sel_m_out,int numElements){
float sum1[nums]; //err1[]
float sum2[nums]; //err2[]
__shared__ float minimal[nums];
__shared__ int m[nums];
int i = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.x * blockIdx.x + threadIdx.x;
//printf("[%d,%d]\n",i,z );
float boundary = vec[i];
if (z < numElements && i < numElements)
{
sum1[z] = w[z] * ((vec[z]<=boundary) ^ (y[z]==-1));
sum2[z] = w[z] * ((vec[z]<=boundary) ^ (y[z]==1));
}
else{
return;
}
__syncthreads();
if (z == 0 && i < numElements){
float tmp1 = 0.0 , tmp2 = 0.0 , tmp3 = 0.0;
float err1,err2;
for (int t = 0; t < nums; ++t){
tmp1+=sum1[t];
tmp2+=sum2[t];
tmp3+=w[t];
}
err1 = tmp1/tmp3;
err2 = tmp2/tmp3;
minimal[i] = err1<err2?err1:err2;
m[i] = err1<err2?1:-1;
}
else
return;
__syncthreads();
if (z == 0 && i == 0)
{
float min_tmp = 100000.0;
int cur_i = -1, sel_m = 0;
for (int t = 0; t < nums; ++t)
{
cur_i = min_tmp<minimal[t]?cur_i:t;
sel_m = min_tmp<minimal[t]?sel_m:m[t];
min_tmp = min_tmp<minimal[t]?min_tmp:minimal[t];
}
*min_out = min_tmp;
*sel_m_out = sel_m;
*cur_i_out = cur_i;
}
}
void cuda_train1(struct pars* pars_p){
size_t size = nums * sizeof(float);
cuda_checker(cudaMemcpy(d_w, w, size, cudaMemcpyHostToDevice));
int cur_j = 0,cur_theta = 0,cur_m = 0;
float cur_min = 100000.0;
for (int j = 0;j<cols;j++){
float *vec = usps[j];
cuda_checker(cudaMemcpy(d_vec, vec, size, cudaMemcpyHostToDevice));
float minimal = 100000.0;
int cur_i = 0,sel_m= 0;
dim3 block(16,16);
dim3 grid ((nums+15)/16,(nums+15)/16);
float *min_out;
cuda_checker(cudaMalloc((void **)&min_out,sizeof(float)));
int *cur_i_out,*sel_m_out;
cuda_checker(cudaMalloc((void **)&cur_i_out,sizeof(int)));
cuda_checker(cudaMalloc((void **)&sel_m_out,sizeof(int)));
vectorAdd_train2d<<<grid,block>>>(d_vec,d_w,d_y,min_out,cur_i_out,sel_m_out,nums);
cuda_checker(cudaMemcpy(&minimal, min_out, sizeof(float), cudaMemcpyDeviceToHost));
cuda_checker(cudaMemcpy(&cur_i, cur_i_out, sizeof(int), cudaMemcpyDeviceToHost));
cuda_checker(cudaMemcpy(&sel_m, sel_m_out, sizeof(int), cudaMemcpyDeviceToHost));
if(minimal<cur_min){
cur_min = minimal;
cur_j = j;
cur_theta = cur_i;
cur_m = sel_m;
}
}
pars_p->return_j = cur_j;
pars_p->theta = usps[cur_j][cur_theta];
pars_p->return_m = cur_m;
//printf("%d,%f,%d",cur_j,pars_p->theta, cur_m);
return;
}
void cuda_train(struct pars* pars_p){
size_t size = nums * sizeof(float);
cuda_checker(cudaMemcpy(d_w, w, size, cudaMemcpyHostToDevice));
float err1,err2,sum_w,err;
int cur_j = 0,cur_theta = 0,cur_m = 0;
float cur_min = 100000.0;
for (int j = 0;j<cols;j++){
float *vec = usps[j];
cuda_checker(cudaMemcpy(d_vec, vec, size, cudaMemcpyHostToDevice));
float minimal = 100000.0;
int cur_i = 0,sel_m= 0;
for(int i=0;i<nums;i++){
float boundary = vec[i];
int m = 0;
int threadsPerBlock = 256;
int blocksPerGrid =(nums + threadsPerBlock - 1) / threadsPerBlock;
vectorAdd_train<<<blocksPerGrid, threadsPerBlock>>>(d_vec, d_w, d_y,
d_err1, d_err2, d_sum_w,nums,boundary);
cuda_checker(cudaMemcpy(&err1, d_err1, sizeof(float), cudaMemcpyDeviceToHost));
cuda_checker(cudaMemcpy(&err2, d_err2, sizeof(float), cudaMemcpyDeviceToHost));
cuda_checker(cudaMemcpy(&sum_w, d_sum_w, sizeof(float), cudaMemcpyDeviceToHost));
if(err1<err2){
err = err1/sum_w;
m = 1;
}else{
err = err2/sum_w;
m = -1;
}
if(err<minimal){
minimal = err;
cur_i = i;
sel_m = m;
}
}
if(minimal<cur_min){
cur_min = minimal;
cur_j = j;
cur_theta = cur_i;
cur_m = sel_m;
}
}
pars_p->return_j = cur_j;
pars_p->theta = usps[cur_j][cur_theta];
pars_p->return_m = cur_m;
return;
}
void train(struct pars* pars_p){
int cur_j = 0,cur_theta = 0,cur_m = 0;
float cur_min = 100000.0;
for (int j = 0;j<cols;j++){
float *vec = usps[j];
float minimal = 100000.0;
int cur_i = 0,sel_m= 0;
for(int i=0;i<nums;i++){
float boundary = vec[i];
float err = 0.0,err1 = 0.0,err2 = 0.0,sum_w = 0;
int m = 0;
for(int z=0;z<nums;z++){
err1 += w[z] * ((vec[z]<=boundary) ^ (y[z]==-1));
err2 += w[z] * ((vec[z]<=boundary) ^ (y[z]==1));
sum_w += w[z];
}
if(err1<err2){
err = err1/sum_w;
m = 1;
}else{
err = err2/sum_w;
m = -1;
}
if(err<minimal){
minimal = err;
cur_i = i;
sel_m = m;
}
}
if(minimal<cur_min){
cur_min = minimal;
cur_j = j;
cur_theta = cur_i;
cur_m = sel_m;
}
}
pars_p->return_j = cur_j;
pars_p->theta = usps[cur_j][cur_theta];
pars_p->return_m = cur_m;
return;
}
struct pars* AdaBoost(int B,float *alpha){
struct pars* allPars = (struct pars*)malloc(sizeof(struct pars)*B);
for (int b=0;b<B;b++){
struct pars pars;
//cuda_train1(&pars);
//train(&pars);
cuda_train(&pars);
float *vec = usps[pars.return_j];
float err = 0.0,w_sum = 0.0;
for(int z =0;z<nums;z++){
err += w[z] * ((vec[z]<=pars.theta) ^ (-pars.return_m == y[z]) );
w_sum += w[z];
}
err = err/w_sum;
alpha[b] = logf((1-err)/err);
for(int z =0;z<nums;z++){
w[z] = ((vec[z]<=pars.theta) ^ (-pars.return_m == y[z]))?(w[z] * (1-err) / err):w[z];
}
allPars[b].return_j = pars.return_j;
allPars[b].return_m = pars.return_m;
allPars[b].theta = pars.theta;
}
return allPars;
}
int * agg_class(float *alpha,struct pars* allPars,int B){
float *res = (float *)malloc(sizeof(float)*nums);
for (int z = 0; z < nums; ++z)
res[z] = 0.0;
int *c_hat = (int *)malloc(sizeof(int)*nums);
for (int b = 0; b < B; ++b)
{
struct pars pars = allPars[b];
float *vec = usps[pars.return_j];
for(int z=0;z<nums;z++){
res[z] += alpha[b]* ((vec[z]<=pars.theta)?(-pars.return_m):pars.return_m);
}
}
for (int z = 0; z < nums; ++z)
c_hat[z] = res[z]>= 0 ? 1:-1;
free(res);
return c_hat;
}
int main(){
usps = (float **)malloc(sizeof(float *)*cols);
w = (float*)malloc(sizeof(float)*nums);
y = (int*)malloc(sizeof(int)*nums);;
for (int i = 0; i < nums; ++i){
w[i] = 1.0/nums;
}
for(int j=0;j<cols;j++){
usps[j] = (float *)malloc(sizeof(float)*nums);
}
FILE* fp = fopen("uspsdata/uspsdata_ext.txt","r");
FILE* fpcl = fopen("uspsdata/uspscl_ext.txt","r");
for(int i=0;i<nums;i++){
fscanf(fpcl,"%d",y+i);
for(int j=0;j<cols;j++){
fscanf(fp,"%f",*(usps+j)+i);
}
}
fclose(fp);fclose(fpcl);
/***********cuda here********/
int numElements = nums;
size_t size = numElements * sizeof(float);
cuda_checker(cudaMalloc((void **)&d_w, size));
cuda_checker(cudaMalloc((void **)&d_sum_w, sizeof(float)));
cuda_checker(cudaMalloc((void **)&d_err1, sizeof(float)));
cuda_checker(cudaMalloc((void **)&d_err2, sizeof(float)));
cuda_checker(cudaMalloc((void **)&d_vec, size));
cuda_checker(cudaMalloc((void **)&d_y, nums*sizeof(int)));
cuda_checker(cudaMemcpy(d_y, y, sizeof(int)*nums, cudaMemcpyHostToDevice));
// Launch the Vector Add CUDA Kernel
// int threadsPerBlock = 256;
// int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
// printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
// vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
// err = cudaGetLastError();
// cuda_checker(err);
// for (int i = 0; i < nums; ++i)
// {
// printf("float is %f\n", h_C[i]);
// }
/*****************************/
clock_t begin, end;
double time_spent;
struct pars* ap;
float *alpha = (float *)malloc(sizeof(float)*5);;
int *c_hat;
begin = clock();
ap = AdaBoost(5,alpha);
c_hat = agg_class(alpha,ap,5);
//struct pars pars;
//cuda_train1(&pars);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
for (int i = 0; i < 5; ++i)
{
printf("%d,%f,%d,%f\n",ap[i].return_j,ap[i].theta,ap[i].return_m,alpha[i]);
}
printf("time is %f\n",time_spent);
for(int j=0;j<cols;j++){
free(usps[j]);
}
free(usps);
free(w);
free(y);
free(alpha);
free(ap);
free(c_hat);
cuda_checker(cudaFree(d_w));
cuda_checker(cudaFree(d_sum_w));
cuda_checker(cudaFree(d_err1));
cuda_checker(cudaFree(d_err2));
cuda_checker(cudaFree(d_vec));
cuda_checker(cudaFree(d_y));
cuda_checker(cudaDeviceReset());
return 0;
} |
5,686 | #include "includes.h"
__global__ void kernelCulcRhoReal(const int N, double *rho, double *q, double *p, const double lambda, const double g)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
double qi = q[i];
double pi = p[i];
rho[i] = 0.5 * qi * qi;
rho[i] += 0.5 * pi * pi;
rho[i] += (lambda / 4.0) * qi * qi * qi * qi;
rho[i] += (g / 6.0) * qi * qi * qi * qi * qi * qi;
}
} |
5,687 | /* ****************************************
*
* CUDA Kernel: matrix minus
*
*/
/* ****************************************
*
* sub2ind - Column-major indexing of 2D arrays
*
*/
template <typename T>
__device__ __forceinline__ T sub2ind( T i, T j, T height ) {
return (i + height*j);
} // end function 'sub2ind'
/* ****************************************
*
* core kernel
*
*/
__global__ void apply_filter(double * Iedit,
const double * ILaplace,
const double * B,
const int * Mask,
const int m,
const int n,
const int p){
/* thread indices */
const int j = blockIdx.y*blockDim.y+threadIdx.y;
const int i = blockIdx.x*blockDim.x+threadIdx.x;
/* matrix calculation */
if ((i >= m) || (j >= n*p) || (Mask[sub2ind(i,j,m)]==0) ){
return;
}
Iedit[sub2ind(i,j,m)] = (ILaplace[sub2ind(i,j,m)]-B[sub2ind(i,j,m)])/4.0;
return ;
}
|
5,688 | #include <stdio.h>
#include <cuda_runtime.h>
#define N (32 * 1024)
#ifndef checkCudaErrors
#define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__)
void __checkCudaErrors(cudaError_t err, const char *file, const int line)
{
if(cudaSuccess != err)
{
fprintf(stderr, "checkCudaErrors() Driver API error = %04d \"%s\" from file <%s>, line %i.\n", err, cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#endif
__global__ void add(int *a, int *b, int *c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main(void)
{
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the CPU
a = (int*)malloc( N * sizeof(int) );
b = (int*)malloc( N * sizeof(int) );
c = (int*)malloc( N * sizeof(int) );
checkCudaErrors(cudaMalloc((void**)&dev_a, N * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&dev_b, N * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&dev_c, N * sizeof(int)));
for (int i = 0; i < N; ++i)
{
a[i] = i;
b[i] = i * 2;
}
checkCudaErrors(cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice));
int threadsPerBlock = 128;
int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock;
add<<<blocksPerGrid,threadsPerBlock>>>(dev_a, dev_b, dev_c);
checkCudaErrors(cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; ++i)
{
printf("%d + %d = %d\n",a[i],b[i],c[i]);
}
// verify that the GPU did the work we requested
bool success = true;
for (int i=0; i<N; i++)
{
if ((a[i] + b[i]) != c[i])
{
printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] );
success = false;
}
}
if (success) printf( "We did it!\n" );
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
free(a);
free(b);
free(c);
return 0;
}
|
5,689 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__ (128,2) sw4_1 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r1, b_r1;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
a_mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
a_muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
a_r1 = 1e0 / 6 * (strx[i] * ((2 * a_mux1 + la[k][j][i-1] * strx[i-1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i-2] * strx[i-2]) * (u_0[k][j][i-2] - u_0[k][j][i]) +
(2 * a_mux2 + la[k][j][i-2] * strx[i-2] + la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i] + 3 * la[k][j][i-1] * strx[i-1]) * (u_0[k][j][i-1] - u_0[k][j][i]) +
(2 * a_mux3 + la[k][j][i-1] * strx[i-1] + la[k][j][i+2] * strx[i+2] + 3 * la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i]) * (u_0[k][j][i+1] - u_0[k][j][i]) +
(2 * a_mux4 + la[k][j][i+1] * strx[i+1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i+2] * strx[i+2]) * (u_0[k][j][i+2] - u_0[k][j][i]))
+ stry[j] * (a_muy1 * (u_0[k][j-2][i] - u_0[k][j][i]) + a_muy2 * (u_0[k][j-1][i] - u_0[k][j][i]) + a_muy3 * (u_0[k][j+1][i] - u_0[k][j][i]) + a_muy4 * (u_0[k][j+2][i] - u_0[k][j][i])) + strz[k] * (a_muz1 * (u_0[k-2][j][i] - u_0[k][j][i]) + a_muz2 * (u_0[k-1][j][i] - u_0[k][j][i]) + a_muz3 * (u_0[k+1][j][i] - u_0[k][j][i]) + a_muz4 * (u_0[k+2][j][i] - u_0[k][j][i])));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k][j][i-2] * (u_1[k][j-2][i-2] - u_1[k][j+2][i-2] + 8 * (-u_1[k][j-1][i-2] + u_1[k][j+1][i-2])) - 8 * (la[k][j][i-1] * (u_1[k][j-2][i-1] - u_1[k][j+2][i-1] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j+1][i-1]))) + 8 * (la[k][j][i+1] * (u_1[k][j-2][i+1] - u_1[k][j+2][i+1] + 8 * (-u_1[k][j-1][i+1] + u_1[k][j+1][i+1]))) - (la[k][j][i+2] * (u_1[k][j-2][i+2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j-1][i+2] + u_1[k][j+1][i+2]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (la[k][j][i-2] * (u_2[k-2][j][i-2] - u_2[k+2][j][i-2] + 8 * (-u_2[k-1][j][i-2] + u_2[k+1][j][i-2])) - 8 * (la[k][j][i-1] * (u_2[k-2][j][i-1] - u_2[k+2][j][i-1] + 8 * (-u_2[k-1][j][i-1] + u_2[k+1][j][i-1]))) + 8 * (la[k][j][i+1] * (u_2[k-2][j][i+1] - u_2[k+2][j][i+1] + 8 * (-u_2[k-1][j][i+1] + u_2[k+1][j][i+1]))) - (la[k][j][i+2] * (u_2[k-2][j][i+2] - u_2[k+2][j][i+2] + 8 * (-u_2[k-1][j][i+2] + u_2[k+1][j][i+2]))));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k][j-2][i-2] - u_1[k][j-2][i+2] + 8 * (-u_1[k][j-2][i-1] + u_1[k][j-2][i+1])) - 8 * (mu[k][j-1][i] * (u_1[k][j-1][i-2] - u_1[k][j-1][i+2] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j-1][i+1]))) + 8 * (mu[k][j+1][i] * (u_1[k][j+1][i-2] - u_1[k][j+1][i+2] + 8 * (-u_1[k][j+1][i-1] + u_1[k][j+1][i+1]))) - (mu[k][j+2][i] * (u_1[k][j+2][i-2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j+2][i-1] + u_1[k][j+2][i+1]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (mu[k-2][j][i] * (u_2[k-2][j][i-2] - u_2[k-2][j][i+2] + 8 * (-u_2[k-2][j][i-1] + u_2[k-2][j][i+1])) - 8 * (mu[k-1][j][i] * (u_2[k-1][j][i-2] - u_2[k-1][j][i+2] + 8 * (-u_2[k-1][j][i-1] + u_2[k-1][j][i+1]))) + 8 * (mu[k+1][j][i] * (u_2[k+1][j][i-2] - u_2[k+1][j][i+2] + 8 * (-u_2[k+1][j][i-1] + u_2[k+1][j][i+1]))) - (mu[k+2][j][i] * (u_2[k+2][j][i-2] - u_2[k+2][j][i+2] + 8 * (-u_2[k+2][j][i-1] + u_2[k+2][j][i+1]))));
uacc_0[k][j][i] = a1 * uacc_0[k][j][i] + cof * a_r1;
b_mux1 = mu[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2] + mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i] + 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1] + mu[k+1][j][i+2] * strx[i+2] + 3.0 * mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i+2] * strx[i+2];
b_muy1 = mu[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] -3e0 / 4 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2] + mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j] + 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1] + mu[k+1][j+2][i] * stry[j+2] + 3.0 * mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] - 3e0 / 4 * mu[k+1][j+2][i] * stry[j+2];
b_muz1 = mu[k+1-1][j][i] * strz[k+1-1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k+1-2][j][i] * strz[k+1-2];
b_muz2 = mu[k+1-2][j][i] * strz[k+1-2] + mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k+1-1][j][i] * strz[k+1-1];
b_muz3 = mu[k+1-1][j][i] * strz[k+1-1] + mu[k+1+2][j][i] * strz[k+1+2] + 3.0 * mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+1+1][j][i] * strz[k+1+1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 /4 * mu[k+1+2][j][i] * strz[k+1+2];
b_r1 = 1e0 / 6 * (strx[i] * ((2 * b_mux1 + la[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i-2] * strx[i-2]) * (u_0[k+1][j][i-2] - u_0[k+1][j][i]) +
(2 * b_mux2 + la[k+1][j][i-2] * strx[i-2] + la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i] + 3 * la[k+1][j][i-1] * strx[i-1]) * (u_0[k+1][j][i-1] - u_0[k+1][j][i]) +
(2 * b_mux3 + la[k+1][j][i-1] * strx[i-1] + la[k+1][j][i+2] * strx[i+2] + 3 * la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i]) * (u_0[k+1][j][i+1] - u_0[k+1][j][i]) +
(2 * b_mux4 + la[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i+2] * strx[i+2]) * (u_0[k+1][j][i+2] - u_0[k+1][j][i]))
+ stry[j] * (b_muy1 * (u_0[k+1][j-2][i] - u_0[k+1][j][i]) + b_muy2 * (u_0[k+1][j-1][i] - u_0[k+1][j][i]) + b_muy3 * (u_0[k+1][j+1][i] - u_0[k+1][j][i]) + b_muy4 * (u_0[k+1][j+2][i] - u_0[k+1][j][i])) + strz[k+1] * (b_muz1 * (u_0[k+1-2][j][i] - u_0[k+1][j][i]) + b_muz2 * (u_0[k+1-1][j][i] - u_0[k+1][j][i]) + b_muz3 * (u_0[k+1+1][j][i] - u_0[k+1][j][i]) + b_muz4 * (u_0[k+1+2][j][i] - u_0[k+1][j][i])));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k+1][j][i-2] * (u_1[k+1][j-2][i-2] - u_1[k+1][j+2][i-2] + 8 * (-u_1[k+1][j-1][i-2] + u_1[k+1][j+1][i-2])) - 8 * (la[k+1][j][i-1] * (u_1[k+1][j-2][i-1] - u_1[k+1][j+2][i-1] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j+1][i-1]))) + 8 * (la[k+1][j][i+1] * (u_1[k+1][j-2][i+1] - u_1[k+1][j+2][i+1] + 8 * (-u_1[k+1][j-1][i+1] + u_1[k+1][j+1][i+1]))) - (la[k+1][j][i+2] * (u_1[k+1][j-2][i+2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j-1][i+2] + u_1[k+1][j+1][i+2]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (la[k+1][j][i-2] * (u_2[k+1-2][j][i-2] - u_2[k+1+2][j][i-2] + 8 * (-u_2[k+1-1][j][i-2] + u_2[k+1+1][j][i-2])) - 8 * (la[k+1][j][i-1] * (u_2[k+1-2][j][i-1] - u_2[k+1+2][j][i-1] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1+1][j][i-1]))) + 8 * (la[k+1][j][i+1] * (u_2[k+1-2][j][i+1] - u_2[k+1+2][j][i+1] + 8 * (-u_2[k+1-1][j][i+1] + u_2[k+1+1][j][i+1]))) - (la[k+1][j][i+2] * (u_2[k+1-2][j][i+2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1-1][j][i+2] + u_2[k+1+1][j][i+2]))));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k+1][j-2][i] * (u_1[k+1][j-2][i-2] - u_1[k+1][j-2][i+2] + 8 * (-u_1[k+1][j-2][i-1] + u_1[k+1][j-2][i+1])) - 8 * (mu[k+1][j-1][i] * (u_1[k+1][j-1][i-2] - u_1[k+1][j-1][i+2] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j-1][i+1]))) + 8 * (mu[k+1][j+1][i] * (u_1[k+1][j+1][i-2] - u_1[k+1][j+1][i+2] + 8 * (-u_1[k+1][j+1][i-1] + u_1[k+1][j+1][i+1]))) - (mu[k+1][j+2][i] * (u_1[k+1][j+2][i-2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j+2][i-1] + u_1[k+1][j+2][i+1]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (mu[k+1-2][j][i] * (u_2[k+1-2][j][i-2] - u_2[k+1-2][j][i+2] + 8 * (-u_2[k+1-2][j][i-1] + u_2[k+1-2][j][i+1])) - 8 * (mu[k+1-1][j][i] * (u_2[k+1-1][j][i-2] - u_2[k+1-1][j][i+2] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1-1][j][i+1]))) + 8 * (mu[k+1+1][j][i] * (u_2[k+1+1][j][i-2] - u_2[k+1+1][j][i+2] + 8 * (-u_2[k+1+1][j][i-1] + u_2[k+1+1][j][i+1]))) - (mu[k+1+2][j][i] * (u_2[k+1+2][j][i-2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1+2][j][i-1] + u_2[k+1+2][j][i+1]))));
uacc_0[k+1][j][i] = a1 * uacc_0[k+1][j][i] + cof * b_r1;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_2 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r2, b_r2;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
a_mux1 = mu[k][j][i-1] * strx[i-1];
a_mux1 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2];
a_mux2 += mu[k][j][i+1] * strx[i+1];
a_mux2 += 3.0 * mu[k][j][i] * strx[i];
a_mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1];
a_mux3 += mu[k][j][i+2] * strx[i+2];
a_mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
a_mux3 += 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1];
a_mux4 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1];
a_muy1 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
a_muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2];
a_muy2 += mu[k][j+1][i] * stry[j+1];
a_muy2 += 3.0 * mu[k][j][i] * stry[j];
a_muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1];
a_muy3 += mu[k][j+2][i] * stry[j+2];
a_muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
a_muy3 += 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1];
a_muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
a_muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
a_muz1 = mu[k-1][j][i] * strz[k-1];
a_muz1 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2];
a_muz2 += mu[k+1][j][i] * strz[k+1];
a_muz2 += 3.0 * mu[k][j][i] * strz[k];
a_muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1];
a_muz3 += mu[k+2][j][i] * strz[k+2];
a_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
a_muz3 += 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1];
a_muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
double _v_18_ = a_mux1 * u_1[k][j][i-2];
_v_18_ -= a_mux1 * u_1[k][j][i];
double _v_22_ = strx[i] * _v_18_;
double _v_19_ = a_mux2 * u_1[k][j][i-1];
_v_19_ -= a_mux2 * u_1[k][j][i];
_v_22_ += strx[i] * _v_19_;
double _v_20_ = a_mux3 * u_1[k][j][i+1];
_v_20_ -= a_mux3 * u_1[k][j][i];
_v_22_ += strx[i] * _v_20_;
double _v_21_ = a_mux4 * u_1[k][j][i+2];
_v_21_ -= a_mux4 * u_1[k][j][i];
_v_22_ += strx[i] * _v_21_;
a_r2 = 1.0 / 6.0 * _v_22_;
double _t_8_ = u_1[k][j-2][i];
_t_8_ -= u_1[k][j][i];
double _t_7_ = 2.0 * a_muy1;
double _v_23_ = la[k][j-1][i] * stry[j-1];
_t_7_ += _v_23_;
_t_7_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
double _t_9_ = 3.0 * la[k][j][i] * stry[j];
_t_9_ += 3.0 * la[k][j-1][i] * stry[j-1];
_t_9_ += 2.0 * a_muy2;
double _t_11_ = 3.0 * la[k][j][i] * stry[j];
_t_11_ += 2.0 * a_muy3;
double _t_13_ = -(3.0 / 4.0 * la[k][j][i] * stry[j]);
_t_13_ += 2.0 * a_muy4;
_t_7_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
double _v_24_ = _t_7_ * _t_8_;
double _v_33_ = stry[j] * _v_24_;
_t_9_ += la[k][j-2][i] * stry[j-2];
_t_9_ += la[k][j+1][i] * stry[j+1];
double _t_10_ = u_1[k][j-1][i];
_t_10_ -= u_1[k][j][i];
double _v_27_ = _t_9_ * _t_10_;
_v_33_ += stry[j] * _v_27_;
_t_11_ += _v_23_;
_t_11_ += la[k][j+2][i] * stry[j+2];
_t_11_ += 3.0 * la[k][j+1][i] * stry[j+1];
double _t_12_ = u_1[k][j+1][i];
_t_12_ -= u_1[k][j][i];
double _v_30_ = _t_11_ * _t_12_;
_v_33_ += stry[j] * _v_30_;
_t_13_ += la[k][j+1][i] * stry[j+1];
_t_13_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
double _t_14_ = u_1[k][j+2][i];
_t_14_ -= u_1[k][j][i];
double _v_32_ = _t_13_ * _t_14_;
_v_33_ += stry[j] * _v_32_;
a_r2 += 1.0 / 6.0 * _v_33_;
double _v_34_ = a_muz1 * u_1[k-2][j][i];
_v_34_ -= a_muz1 * u_1[k][j][i];
double _v_38_ = strz[k] * _v_34_;
double _v_35_ = a_muz2 * u_1[k-1][j][i];
_v_35_ -= a_muz2 * u_1[k][j][i];
_v_38_ += strz[k] * _v_35_;
double _v_36_ = a_muz3 * u_1[k+1][j][i];
_v_36_ -= a_muz3 * u_1[k][j][i];
_v_38_ += strz[k] * _v_36_;
double _v_37_ = a_muz4 * u_1[k+2][j][i];
_v_37_ -= a_muz4 * u_1[k][j][i];
_v_38_ += strz[k] * _v_37_;
a_r2 += 1.0 / 6.0 * _v_38_;
double _t_27_ = u_0[k][j-2][i-1];
_t_27_ -= u_0[k][j+2][i-1];
_t_27_ += 8.0 * -u_0[k][j-1][i-1];
_t_27_ += 8.0 * u_0[k][j+1][i-1];
double _t_38_ = -u_0[k][j-2][i-1];
_t_38_ += u_0[k][j-2][i+1];
double _t_40_ = 8.0 * -u_0[k][j-1][i-1];
_t_40_ += 8.0 * u_0[k][j-1][i+1];
_t_40_ += u_0[k][j-1][i-2];
_t_40_ -= u_0[k][j-1][i+2];
double _t_43_ = 8.0 * -u_0[k][j+1][i-1];
_t_43_ += 8.0 * u_0[k][j+1][i+1];
_t_43_ += u_0[k][j+1][i-2];
_t_43_ -= u_0[k][j+1][i+2];
double _t_46_ = -u_0[k][j+2][i-1];
_t_46_ += u_0[k][j+2][i+1];
double _t_30_ = u_0[k][j-2][i+1];
_t_30_ -= u_0[k][j+2][i+1];
_t_30_ += 8.0 * -u_0[k][j-1][i+1];
_t_30_ += 8.0 * u_0[k][j+1][i+1];
double _t_25_ = -u_0[k][j-1][i-2];
_t_25_ += u_0[k][j+1][i-2];
double _t_33_ = -u_0[k][j-1][i+2];
_t_33_ += u_0[k][j+1][i+2];
double _t_22_ = -(8.0 * mu[k][j][i-1] * _t_27_);
_t_22_ += 8.0 * mu[k][j][i+1] * _t_30_;
double _v_39_ = mu[k][j][i-2] * u_0[k][j-2][i-2];
_v_39_ += mu[k][j][i-2] * 8.0 * _t_25_;
_v_39_ -= mu[k][j][i-2] * u_0[k][j+2][i-2];
double _v_42_ = la[k][j-2][i] * u_0[k][j-2][i-2];
_v_42_ -= la[k][j-2][i] * u_0[k][j-2][i+2];
_v_42_ += la[k][j-2][i] * 8.0 * _t_38_;
double _v_43_ = la[k][j+2][i] * u_0[k][j+2][i-2];
_v_43_ -= la[k][j+2][i] * u_0[k][j+2][i+2];
_v_43_ += la[k][j+2][i] * 8.0 * _t_46_;
double _v_45_ = la[k][j-2][i] * u_2[k-2][j-2][i];
double _v_46_ = la[k][j+2][i] * u_2[k-2][j+2][i];
double _v_48_ = mu[k-2][j][i] * u_2[k-2][j-2][i];
_v_48_ -= mu[k-2][j][i] * u_2[k-2][j+2][i];
double _v_40_ = mu[k][j][i+2] * u_0[k][j-2][i+2];
_v_40_ += mu[k][j][i+2] * 8.0 * _t_33_;
_v_40_ -= mu[k][j][i+2] * u_0[k][j+2][i+2];
_t_22_ += _v_39_;
_t_22_ -= _v_40_;
double _t_21_ = 1.0 / 144.0 * strx[i] * stry[j];
double _t_20_ = _t_21_ * _t_22_;
double _t_35_ = _v_42_;
_t_35_ -= 8.0 * la[k][j-1][i] * _t_40_;
_t_35_ += 8.0 * la[k][j+1][i] * _t_43_;
_t_35_ -= _v_43_;
double _t_34_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_20_ += _t_34_ * _t_35_;
double _t_53_ = u_2[k-2][j-1][i];
_t_53_ += 8.0 * u_2[k+1][j-1][i];
double _t_64_ = -u_2[k-2][j-1][i];
_t_64_ += u_2[k-2][j+1][i];
double _t_69_ = 8.0 * -u_2[k+1][j-1][i];
_t_69_ += 8.0 * u_2[k+1][j+1][i];
_t_69_ += u_2[k+1][j-2][i];
_t_69_ -= u_2[k+1][j+2][i];
double _t_56_ = u_2[k-2][j+1][i];
_t_56_ += 8.0 * u_2[k+1][j+1][i];
double _t_51_ = u_2[k+1][j-2][i];
double _t_59_ = u_2[k+1][j+2][i];
_t_53_ -= u_2[k+2][j-1][i];
_t_53_ += 8.0 * -u_2[k-1][j-1][i];
double _t_48_ = -(8.0 * la[k][j-1][i] * _t_53_);
_t_56_ -= u_2[k+2][j+1][i];
_t_56_ += 8.0 * -u_2[k-1][j+1][i];
_t_48_ += 8.0 * la[k][j+1][i] * _t_56_;
_t_51_ += -u_2[k-1][j-2][i];
_v_45_ += la[k][j-2][i] * 8.0 * _t_51_;
_v_45_ -= la[k][j-2][i] * u_2[k+2][j-2][i];
_t_48_ += _v_45_;
_t_59_ += -u_2[k-1][j+2][i];
_v_46_ += la[k][j+2][i] * 8.0 * _t_59_;
_v_46_ -= la[k][j+2][i] * u_2[k+2][j+2][i];
_t_48_ -= _v_46_;
double _t_47_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_20_ += _t_47_ * _t_48_;
_v_48_ += mu[k-2][j][i] * 8.0 * _t_64_;
double _t_61_ = _v_48_;
_t_61_ += 8.0 * mu[k+1][j][i] * _t_69_;
double _t_66_ = u_2[k-1][j-2][i];
_t_66_ -= u_2[k-1][j+2][i];
_t_66_ += 8.0 * -u_2[k-1][j-1][i];
_t_66_ += 8.0 * u_2[k-1][j+1][i];
_t_61_ -= 8.0 * mu[k-1][j][i] * _t_66_;
double _t_72_ = -u_2[k+2][j-1][i];
_t_72_ += u_2[k+2][j+1][i];
double _v_49_ = mu[k+2][j][i] * 8.0 * _t_72_;
_v_49_ += mu[k+2][j][i] * u_2[k+2][j-2][i];
_v_49_ -= mu[k+2][j][i] * u_2[k+2][j+2][i];
_t_61_ -= _v_49_;
double _t_60_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_20_ += _t_60_ * _t_61_;
a_r2 += _t_20_;
double uacc_1kc0jc0ic0 = cof * a_r2;
uacc_1kc0jc0ic0 += a1 * uacc_1[k][j][i];
b_mux1 = mu[k+1][j][i-1] * strx[i-1];
b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2];
b_mux2 += mu[k+1][j][i+1] * strx[i+1];
b_mux2 += 3.0 * mu[k+1][j][i] * strx[i];
b_mux2 += 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1];
b_mux3 += mu[k+1][j][i+2] * strx[i+2];
b_mux3 += 3.0 * mu[k+1][j][i+1] * strx[i+1];
b_mux3 += 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1];
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i+2] * strx[i+2];
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_muy1 = mu[k+1][j-1][i] * stry[j-1];
b_muy1 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muy1 -= 3.0 / 4.0 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2];
b_muy2 += mu[k+1][j+1][i] * stry[j+1];
b_muy2 += 3.0 * mu[k+1][j][i] * stry[j];
b_muy2 += 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1];
b_muy3 += mu[k+1][j+2][i] * stry[j+2];
b_muy3 += 3.0 * mu[k+1][j+1][i] * stry[j+1];
b_muy3 += 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j+2][i] * stry[j+2];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muz1 = mu[k][j][i] * strz[k];
b_muz1 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz1 -= 3.0 / 4.0 * mu[k-1][j][i] * strz[k-1];
b_muz2 = mu[k-1][j][i] * strz[k-1];
b_muz2 += mu[k+2][j][i] * strz[k+2];
b_muz2 += 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz2 += 3.0 * mu[k][j][i] * strz[k];
b_muz3 = mu[k][j][i] * strz[k];
b_muz3 += mu[k+3][j][i] * strz[k+3];
b_muz3 += 3.0 * mu[k+2][j][i] * strz[k+2];
b_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+2][j][i] * strz[k+2];
b_muz4 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 -= 3.0 / 4.0 * mu[k+3][j][i] * strz[k+3];
double _v_87_ = b_muz1 * u_1[k-1][j][i];
_v_87_ -= b_muz1 * u_1[k+1][j][i];
double _v_88_ = -(b_muz2 * u_1[k+1][j][i]);
_v_88_ += b_muz2 * u_1[k][j][i];
double _v_89_ = -(b_muz3 * u_1[k+1][j][i]);
_v_89_ += b_muz3 * u_1[k+2][j][i];
double _v_90_ = -(b_muz4 * u_1[k+1][j][i]);
_v_90_ += b_muz4 * u_1[k+3][j][i];
double _v_71_ = -(b_mux1 * u_1[k+1][j][i]);
_v_71_ += b_mux1 * u_1[k+1][j][i-2];
double _v_72_ = -(b_mux2 * u_1[k+1][j][i]);
_v_72_ += b_mux2 * u_1[k+1][j][i-1];
double _v_73_ = -(b_mux3 * u_1[k+1][j][i]);
_v_73_ += b_mux3 * u_1[k+1][j][i+1];
double _v_74_ = -(b_mux4 * u_1[k+1][j][i]);
_v_74_ += b_mux4 * u_1[k+1][j][i+2];
double _t_81_ = -(u_1[k+1][j][i]);
_t_81_ += u_1[k+1][j-2][i];
double _t_83_ = -(u_1[k+1][j][i]);
_t_83_ += u_1[k+1][j-1][i];
double _t_85_ = -(u_1[k+1][j][i]);
_t_85_ += u_1[k+1][j+1][i];
double _t_87_ = -(u_1[k+1][j][i]);
_t_87_ += u_1[k+1][j+2][i];
double _v_91_ = strz[k+1] * _v_87_;
_v_91_ += strz[k+1] * _v_88_;
_v_91_ += strz[k+1] * _v_89_;
_v_91_ += strz[k+1] * _v_90_;
b_r2 = 1.0 / 6.0 * _v_91_;
double _v_75_ = strx[i] * _v_71_;
_v_75_ += strx[i] * _v_72_;
_v_75_ += strx[i] * _v_73_;
_v_75_ += strx[i] * _v_74_;
b_r2 += 1.0 / 6.0 * _v_75_;
double _t_80_ = 2.0 * b_muy1;
double _v_76_ = la[k+1][j-1][i] * stry[j-1];
_t_80_ += _v_76_;
_t_80_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j];
double _t_82_ = 3.0 * la[k+1][j][i] * stry[j];
_t_82_ += 3.0 * la[k+1][j-1][i] * stry[j-1];
_t_82_ += 2.0 * b_muy2;
double _t_84_ = 3.0 * la[k+1][j][i] * stry[j];
_t_84_ += 2.0 * b_muy3;
double _t_86_ = -(3.0 / 4.0 * la[k+1][j][i] * stry[j]);
_t_86_ += 2.0 * b_muy4;
_t_80_ -= 3.0 / 4.0 * la[k+1][j-2][i] * stry[j-2];
double _v_77_ = _t_80_ * _t_81_;
double _v_86_ = stry[j] * _v_77_;
_t_82_ += la[k+1][j-2][i] * stry[j-2];
double _v_79_ = la[k+1][j+1][i] * stry[j+1];
_t_84_ += 3.0 * la[k+1][j+1][i] * stry[j+1];
_t_82_ += _v_79_;
double _v_80_ = _t_82_ * _t_83_;
_v_86_ += stry[j] * _v_80_;
_t_84_ += _v_76_;
_t_84_ += la[k+1][j+2][i] * stry[j+2];
_t_86_ -= 3.0 / 4.0 * la[k+1][j+2][i] * stry[j+2];
double _v_83_ = _t_84_ * _t_85_;
_v_86_ += stry[j] * _v_83_;
_t_86_ += _v_79_;
double _v_85_ = _t_86_ * _t_87_;
_v_86_ += stry[j] * _v_85_;
b_r2 += 1.0 / 6.0 * _v_86_;
double _v_101_ = mu[k-1][j][i] * u_2[k-1][j-2][i];
_v_101_ -= mu[k-1][j][i] * u_2[k-1][j+2][i];
double _v_98_ = la[k+1][j-2][i] * u_2[k-1][j-2][i];
_v_98_ -= la[k+1][j-2][i] * u_2[k+3][j-2][i];
double _v_99_ = la[k+1][j+2][i] * u_2[k-1][j+2][i];
_v_99_ -= la[k+1][j+2][i] * u_2[k+3][j+2][i];
double _v_95_ = la[k+1][j-2][i] * u_0[k+1][j-2][i-2];
_v_95_ -= la[k+1][j-2][i] * u_0[k+1][j-2][i+2];
double _v_96_ = la[k+1][j+2][i] * u_0[k+1][j+2][i-2];
_v_96_ -= la[k+1][j+2][i] * u_0[k+1][j+2][i+2];
double _v_102_ = mu[k+3][j][i] * u_2[k+3][j-2][i];
_v_102_ -= mu[k+3][j][i] * u_2[k+3][j+2][i];
double _v_92_ = mu[k+1][j][i-2] * u_0[k+1][j-2][i-2];
_v_92_ -= mu[k+1][j][i-2] * u_0[k+1][j+2][i-2];
double _v_93_ = mu[k+1][j][i+2] * u_0[k+1][j-2][i+2];
_v_93_ -= mu[k+1][j][i+2] * u_0[k+1][j+2][i+2];
double _t_137_ = -u_2[k-1][j-1][i];
_t_137_ += u_2[k-1][j+1][i];
double _t_126_ = u_2[k-1][j-1][i];
_t_126_ -= u_2[k+3][j-1][i];
_t_126_ += 8.0 * -u_2[k][j-1][i];
_t_126_ += 8.0 * u_2[k+2][j-1][i];
double _t_129_ = u_2[k-1][j+1][i];
_t_129_ -= u_2[k+3][j+1][i];
_t_129_ += 8.0 * -u_2[k][j+1][i];
_t_129_ += 8.0 * u_2[k+2][j+1][i];
double _t_142_ = 8.0 * -u_2[k+2][j-1][i];
_t_142_ += 8.0 * u_2[k+2][j+1][i];
_t_142_ += u_2[k+2][j-2][i];
_t_142_ -= u_2[k+2][j+2][i];
double _t_139_ = 8.0 * -u_2[k][j-1][i];
_t_139_ += 8.0 * u_2[k][j+1][i];
_t_139_ += u_2[k][j-2][i];
_t_139_ -= u_2[k][j+2][i];
double _t_145_ = -u_2[k+3][j-1][i];
_t_145_ += u_2[k+3][j+1][i];
double _t_124_ = -u_2[k][j-2][i];
_t_124_ += u_2[k+2][j-2][i];
double _t_132_ = -u_2[k][j+2][i];
_t_132_ += u_2[k+2][j+2][i];
_v_101_ += mu[k-1][j][i] * 8.0 * _t_137_;
double _t_134_ = _v_101_;
_t_134_ += 8.0 * mu[k+2][j][i] * _t_142_;
_t_134_ -= 8.0 * mu[k][j][i] * _t_139_;
_v_102_ += mu[k+3][j][i] * 8.0 * _t_145_;
_t_134_ -= _v_102_;
double _t_133_ = 1.0 / 144.0 * stry[j] * strz[k+1];
double _t_94_ = 1.0 / 144.0 * strx[i] * stry[j];
double _t_93_ = _t_133_ * _t_134_;
_v_98_ += la[k+1][j-2][i] * 8.0 * _t_124_;
double _t_121_ = _v_98_;
_t_121_ -= 8.0 * la[k+1][j-1][i] * _t_126_;
_t_121_ += 8.0 * la[k+1][j+1][i] * _t_129_;
_v_99_ += la[k+1][j+2][i] * 8.0 * _t_132_;
_t_121_ -= _v_99_;
double _t_120_ = _t_133_;
_t_93_ += _t_120_ * _t_121_;
double _t_100_ = u_0[k+1][j-2][i-1];
_t_100_ -= u_0[k+1][j+2][i-1];
_t_100_ += 8.0 * -u_0[k+1][j-1][i-1];
_t_100_ += 8.0 * u_0[k+1][j+1][i-1];
double _t_111_ = -u_0[k+1][j-2][i-1];
_t_111_ += u_0[k+1][j-2][i+1];
double _t_113_ = 8.0 * -u_0[k+1][j-1][i-1];
_t_113_ += 8.0 * u_0[k+1][j-1][i+1];
_t_113_ += u_0[k+1][j-1][i-2];
_t_113_ -= u_0[k+1][j-1][i+2];
double _t_116_ = 8.0 * -u_0[k+1][j+1][i-1];
_t_116_ += 8.0 * u_0[k+1][j+1][i+1];
_t_116_ += u_0[k+1][j+1][i-2];
_t_116_ -= u_0[k+1][j+1][i+2];
double _t_119_ = -u_0[k+1][j+2][i-1];
_t_119_ += u_0[k+1][j+2][i+1];
double _t_103_ = u_0[k+1][j-2][i+1];
_t_103_ -= u_0[k+1][j+2][i+1];
_t_103_ += 8.0 * -u_0[k+1][j-1][i+1];
_t_103_ += 8.0 * u_0[k+1][j+1][i+1];
double _t_98_ = -u_0[k+1][j-1][i-2];
_t_98_ += u_0[k+1][j+1][i-2];
double _t_106_ = -u_0[k+1][j-1][i+2];
_t_106_ += u_0[k+1][j+1][i+2];
double _t_95_ = -(8.0 * mu[k+1][j][i-1] * _t_100_);
_t_95_ += 8.0 * mu[k+1][j][i+1] * _t_103_;
_v_92_ += mu[k+1][j][i-2] * 8.0 * _t_98_;
_t_95_ += _v_92_;
_v_93_ += mu[k+1][j][i+2] * 8.0 * _t_106_;
_t_95_ -= _v_93_;
_t_93_ += _t_94_ * _t_95_;
_v_95_ += la[k+1][j-2][i] * 8.0 * _t_111_;
double _t_108_ = _v_95_;
_t_108_ -= 8.0 * la[k+1][j-1][i] * _t_113_;
_t_108_ += 8.0 * la[k+1][j+1][i] * _t_116_;
_v_96_ += la[k+1][j+2][i] * 8.0 * _t_119_;
_t_108_ -= _v_96_;
double _t_107_ = _t_94_;
_t_93_ += _t_107_ * _t_108_;
b_r2 += _t_93_;
double _v_105_ = cof * b_r2;
double uacc_1kp1jc0ic0 = _v_105_;
uacc_1kp1jc0ic0 += a1 * uacc_1[k+1][j][i];
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_1[k+1][j][i] = uacc_1kp1jc0ic0;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_3 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 10
for (int k=2; k<=N-3; k++) {
mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k][j][i-2] - u_2[k][j][i]) + mux2 * (u_2[k][j][i-1] - u_2[k][j][i]) + mux3 * (u_2[k][j][i+1] - u_2[k][j][i]) + mux4 * (u_2[k][j][i+2] - u_2[k][j][i])) +
stry[j] * (muy1 * (u_2[k][j-2][i] - u_2[k][j][i]) + muy2 * (u_2[k][j-1][i] - u_2[k][j][i]) + muy3 * (u_2[k][j+1][i] - u_2[k][j][i]) + muy4 * (u_2[k][j+2][i] - u_2[k][j][i])) +
strz[k] * ((2 * muz1 + la[k-1][j][i] * strz[k-1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k-2][j][i] * strz[k-2]) * (u_2[k-2][j][i] - u_2[k][j][i]) +
(2 * muz2 + la[k-2][j][i] * strz[k-2] + la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k] + 3 * la[k-1][j][i] * strz[k-1]) * (u_2[k-1][j][i] - u_2[k][j][i]) +
(2 * muz3 + la[k-1][j][i] * strz[k-1] + la[k+2][j][i] * strz[k+2] + 3 * la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k]) * (u_2[k+1][j][i] - u_2[k][j][i]) +
(2 * muz4 + la[k+1][j][i] * strz[k+1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k+2][j][i] * strz[k+2]) * (u_2[k+2][j][i] - u_2[k][j][i])));
r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k][j][i-2] * (u_0[k-2][j][i-2] - u_0[k+2][j][i-2] + 8 * (-u_0[k-1][j][i-2] + u_0[k+1][j][i-2])) - 8 * (mu[k][j][i-1] * (u_0[k-2][j][i-1] - u_0[k+2][j][i-1] + 8 * (-u_0[k-1][j][i-1] + u_0[k+1][j][i-1]))) + 8 * (mu[k][j][i+1] * (u_0[k-2][j][i+1] - u_0[k+2][j][i+1] + 8 * (-u_0[k-1][j][i+1] + u_0[k+1][j][i+1]))) - (mu[k][j][i+2] * (u_0[k-2][j][i+2] - u_0[k+2][j][i+2] + 8 * (-u_0[k-1][j][i+2] + u_0[k+1][j][i+2]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k-2][j-2][i] - u_1[k+2][j-2][i] + 8 * (-u_1[k-1][j-2][i] + u_1[k+1][j-2][i])) - 8 * (mu[k][j-1][i] * (u_1[k-2][j-1][i] - u_1[k+2][j-1][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k+1][j-1][i]))) + 8 * (mu[k][j+1][i] * (u_1[k-2][j+1][i] - u_1[k+2][j+1][i] + 8 * (-u_1[k-1][j+1][i] + u_1[k+1][j+1][i]))) - (mu[k][j+2][i] * (u_1[k-2][j+2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k-1][j+2][i] + u_1[k+1][j+2][i]))));
r3 += strx[i] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_0[k-2][j][i-2] - u_0[k-2][j][i+2] + 8 * (-u_0[k-2][j][i-1] + u_0[k-2][j][i+1])) - 8 * (la[k-1][j][i] * (u_0[k-1][j][i-2] - u_0[k-1][j][i+2] + 8 * (-u_0[k-1][j][i-1] + u_0[k-1][j][i+1]))) + 8 * (la[k+1][j][i] * (u_0[k+1][j][i-2] - u_0[k+1][j][i+2] + 8 * (-u_0[k+1][j][i-1] + u_0[k+1][j][i+1]))) - (la[k+2][j][i] * (u_0[k+2][j][i-2] - u_0[k+2][j][i+2] + 8 * (-u_0[k+2][j][i-1] + u_0[k+2][j][i+1]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_1[k-2][j-2][i] - u_1[k-2][j+2][i] + 8 * (-u_1[k-2][j-1][i] + u_1[k-2][j+1][i])) - 8 * (la[k-1][j][i] * (u_1[k-1][j-2][i] - u_1[k-1][j+2][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k-1][j+1][i]))) + 8 * (la[k+1][j][i] * (u_1[k+1][j-2][i] - u_1[k+1][j+2][i] + 8 * (-u_1[k+1][j-1][i] + u_1[k+1][j+1][i]))) - (la[k+2][j][i] * (u_1[k+2][j-2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k+2][j-1][i] + u_1[k+2][j+1][i]))));
uacc_2[k][j][i] = a1 * uacc_2[k][j][i] + cof * r3;
}
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
cudaMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_1;
cudaMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_2;
cudaMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_0;
cudaMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_1;
cudaMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_2;
cudaMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
double *strz;
cudaMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
sw4_1 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
sw4_2 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
sw4_3 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (uacc_0);
cudaFree (uacc_1);
cudaFree (uacc_2);
cudaFree (u_0);
cudaFree (u_1);
cudaFree (u_2);
cudaFree (mu);
cudaFree (la);
cudaFree (strx);
cudaFree (stry);
cudaFree (strz);
}
|
5,690 | #include <stdio.h>
#include <cuda.h>
__global__ void dkernel() {
printf("Hello World.\n");
}
int main() {
dkernel<<<1, 1>>>();
cudaThreadSynchronize();
return 0;
}
|
5,691 | #include <stdio.h>
#define ARRAY_SIZE 128
// Helper function to print an array
void print_array(float *array, int array_size) {
printf("{ ");
for (int i = 0; i < array_size; i++) {
printf("%0.2f, ", array[i]);
}
printf("}\n");
}
// Using shared memory (For clarity, hardcoding 128 threads/elements and omitting out-of-bounds checks)
__global__ void use_shared_memory(float *array)
{
// Local variables (private to each thread)
int index = threadIdx.x;
float sum = 0.0f;
float average;
// __shared__ variables are visible to all threads in the thread block
// and have the same lifetime as the thread block
__shared__ float sh_arr[ARRAY_SIZE];
// Copy data from "array" in global memory to sh_arr in shared memory.
// Here, each thread is responsible for copying a single element.
sh_arr[index] = array[index];
__syncthreads(); // Ensure all the writes to shared memory have completed
// Now, sh_arr is fully populated. Let's find the average of all previous elements
for (int i=0; i<index; i++) {
sum += sh_arr[i];
}
// Average of array[0..index-1]
if (index > 0) {
average = sum / (index + 0.0f);
} else {
average = 0.0f;
}
// If array[index] is greater than the average of array[0..index-1], replace with average.
// since array[] is in global memory, this change will be seen by the host (and potentially
// other thread blocks, if any)
if (array[index] > average) {
array[index] = average;
}
//printf("Thread: %d, and average: %0.2f\n", index, average);
// The following code has NO EFFECT: it modifies shared memory, but
// the resulting modified data is never copied back to global memory
// and vanishes when the thread block completes
sh_arr[index] = 3.14;
}
int main(int argc, char **argv)
{
// Declare 1D array on host
float h_arr[ARRAY_SIZE];
// Declare device memory pointer
float *d_arr;
// Memory size
int SIZE = ARRAY_SIZE * sizeof(float);
// Initialize 1D array on host
for (int i=0; i<ARRAY_SIZE; i++) {
if (i % 2) { h_arr[i] = i * 2.5f; } else { h_arr[i] = 0.0f; }
}
// Print the input array
print_array(h_arr, ARRAY_SIZE);
// Allocate device memory
cudaMalloc((void **) &d_arr, SIZE);
// Transfer input array from host memory to device memory
cudaMemcpy((void *)d_arr, (void *)h_arr, SIZE, cudaMemcpyHostToDevice);
// Launch kernel
use_shared_memory<<<1, ARRAY_SIZE>>>(d_arr);
// Copy the result from device to the host
cudaMemcpy((void *)h_arr, (void *)d_arr, SIZE, cudaMemcpyDeviceToHost);
// Print the output array
print_array(h_arr, ARRAY_SIZE);
// Free GPU memory allocation
cudaFree(d_arr);
return 0;
}
|
5,692 | #include <cstdio>
#include <cstdlib>
#define N 8
__global__ void mmax(int* in, int* out)
{
// ask Mark, I have no idea - Ian
int idx = threadIdx.x + blockDim.x*blockIdx.x;
__shared__ int rslt[N];
rslt[idx] = in[idx];
int lim = N/2;
int temp = 0;
while (lim > 0) {
__syncthreads();
if(idx < lim) {
temp = max(rslt[2*idx], rslt[2*idx + 1]);
}
__syncthreads();
rslt[idx] = temp;
lim /= 2;
}
if (idx == 0)
*out = rslt[0];
}
int main()
{
int* in = (int*) malloc(N*sizeof(int));
for(int i = 0; i < N; i++) {
in[i] = rand() % 256;
printf("%d ", in[i]);
}
printf("\n");
int* din;
cudaMalloc((void**) &din, N*sizeof(int));
cudaMemcpy(din, in, N*sizeof(int), cudaMemcpyHostToDevice);
int* dout;
cudaMalloc((void**) &dout, sizeof(int));
mmax<<<1,N>>>(din,dout);
int rslt;
cudaMemcpy(&rslt, dout, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n", rslt);
cudaFree(din); cudaFree(dout); free(in);
} |
5,693 | /**
Cで学ぶアルゴリズムとデータ構造
ステップバイステップでN−クイーン問題を最適化
一般社団法人 共同通信社 情報技術局 鈴木 維一郎(suzuki.iichiro@kyodonews.jp)
コンパイル
$ nvcc CUDA01_N-Queen.cu -o CUDA01_N-Queen
実行
$ ./CUDA01_N-Queen
1. ブルートフォース 力任せ探索
全ての可能性のある解の候補を体系的に数え上げ、それぞれの解候補が問題の解とな
るかをチェックする方法
(※)各行に1個の王妃を配置する組み合わせを再帰的に列挙組み合わせを生成するだ
けであって8王妃問題を解いているわけではありません
実行結果
:
:
16777209: 7 7 7 7 7 7 7 0
16777210: 7 7 7 7 7 7 7 1
16777211: 7 7 7 7 7 7 7 2
16777212: 7 7 7 7 7 7 7 3
16777213: 7 7 7 7 7 7 7 4
16777214: 7 7 7 7 7 7 7 5
16777215: 7 7 7 7 7 7 7 6
16777216: 7 7 7 7 7 7 7 7
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define THREAD_NUM 96
#define MAX 27
//
long Total=0 ; //合計解
long Unique=0;
int down[2*MAX-1]; //down:flagA 縦 配置フラグ
int left[2*MAX-1]; //left:flagB 斜め配置フラグ
int right[2*MAX-1]; //right:flagC 斜め配置フラグ
int aBoard[MAX];
int aT[MAX];
int aS[MAX];
int bit;
int COUNT2,COUNT4,COUNT8;
//
__global__ void solve_nqueen_cuda_kernel_bt_bm(
int n,int mark,
unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,
unsigned int* results,int totalCond){
const int tid=threadIdx.x,bid=blockIdx.x,idx=bid*blockDim.x+tid;
__shared__ unsigned int down[THREAD_NUM][10],left[THREAD_NUM][10],right[THREAD_NUM][10],
bitmap[THREAD_NUM][10],sum[THREAD_NUM];
const unsigned int mask=(1<<n)-1;int total=0,i=0;unsigned int bit;
if(idx<totalCond){
down[tid][i]=totalDown[idx];
left[tid][i]=totalLeft[idx];
right[tid][i]=totalRight[idx];
bitmap[tid][i]=down[tid][i]|left[tid][i]|right[tid][i];
while(i>=0){
if((bitmap[tid][i]&mask)==mask){i--;}
else{
bit=(bitmap[tid][i]+1)&~bitmap[tid][i];
bitmap[tid][i]|=bit;
if((bit&mask)!=0){
if(i+1==mark){total++;i--;}
else{
down[tid][i+1]=down[tid][i]|bit;
left[tid][i+1]=(left[tid][i]|bit)<<1;
right[tid][i+1]=(right[tid][i]|bit)>>1;
bitmap[tid][i+1]=(down[tid][i+1]|left[tid][i+1]|right[tid][i+1]);
i++;
}
}else{i--;}
}
}
sum[tid]=total;
}else{sum[tid]=0;}
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){sum[tid]+=sum[tid+64];}
__syncthreads();if(tid<32){sum[tid]+=sum[tid+32];}
__syncthreads();if(tid<16){sum[tid]+=sum[tid+16];}
__syncthreads();if(tid<8){sum[tid]+=sum[tid+8];}
__syncthreads();if(tid<4){sum[tid]+=sum[tid+4];}
__syncthreads();if(tid<2){sum[tid]+=sum[tid+2];}
__syncthreads();if(tid<1){sum[tid]+=sum[tid+1];}
__syncthreads();if(tid==0){results[bid]=sum[0];}
}
//
long long solve_nqueen_cuda(int n,int steps) {
unsigned int down[32];unsigned int left[32];unsigned int right[32];
unsigned int m[32];unsigned int bit;
if(n<=0||n>32){return 0;}
unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalRight=new unsigned int[steps];
unsigned int* results=new unsigned int[steps];
unsigned int* downCuda;unsigned int* leftCuda;unsigned int* rightCuda;
unsigned int* resultsCuda;
cudaMalloc((void**) &downCuda,sizeof(int)*steps);
cudaMalloc((void**) &leftCuda,sizeof(int)*steps);
cudaMalloc((void**) &rightCuda,sizeof(int)*steps);
cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
const unsigned int mask=(1<<n)-1;
const unsigned int mark=n>11?n-10:2;
long long total=0;int totalCond=0;
int i=0,j;down[0]=0;left[0]=0;right[0]=0;m[0]=0;bool computed=false;
for(j=0;j<n/2;j++){
bit=(1<<j);m[0]|=bit;
down[1]=bit;left[1]=bit<<1;right[1]=bit>>1;
m[1]=(down[1]|left[1]|right[1]);
i=1;
while(i>0){
if((m[i]&mask)==mask){i--;}
else{
bit=(m[i]+1)&~m[i];m[i]|=bit;
if((bit&mask)!=0){
down[i+1]=down[i]|bit;left[i+1]=(left[i]|bit)<<1;right[i+1]=(right[i]|bit)>>1;
m[i+1]=(down[i+1]|left[i+1]|right[i+1]);
i++;
if(i==mark){
totalDown[totalCond]=down[i];totalLeft[totalCond]=left[i];totalRight[totalCond]=right[i];
totalCond++;
if(totalCond==steps){
if(computed){
cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];}
computed=false;
}
cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
computed=true;totalCond=0;
}
i--;
}
}else{i --;}
}
}
}
if(computed){
cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];}
computed=false;
}
cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];}
total*=2;
if(n%2==1){
computed=false;totalCond=0;bit=(1<<(n-1)/2);m[0]|=bit;
down[1]=bit;left[1]=bit<<1;right[1]=bit>>1;
m[1]=(down[1]|left[1]|right[1]);
i=1;
while(i>0){
if((m[i]&mask)==mask){i--;}
else{
bit=(m[i]+1)&~m[i];m[i]|=bit;
if((bit&mask)!=0){
down[i+1]=down[i]|bit;left[i+1]=(left[i]|bit)<<1;right[i+1]=(right[i]|bit)>>1;
m[i+1]=(down[i+1]|left[i+1]|right[i+1]);
i++;
if(i==mark){
totalDown[totalCond]=down[i];totalLeft[totalCond]=left[i];totalRight[totalCond]=right[i];
totalCond++;
if(totalCond==steps){
if(computed){
cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];}
computed=false;
}
cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
computed=true;totalCond=0;
}
i--;
}
}else{i --;}
}
}
if(computed){
cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];}
computed=false;
}
cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];}
}
cudaFree(downCuda);cudaFree(leftCuda);cudaFree(rightCuda);cudaFree(resultsCuda);
delete[] totalDown;delete[] totalLeft;delete[] totalRight;delete[] results;
return total;
}
/** CUDA 初期化 **/
bool InitCUDA(){
int count;
cudaGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
cudaSetDevice(i);
return true;
}
//main()以外のメソッドはここに一覧表記させます
void TimeFormat(clock_t utime,char *form);
void dtob(int score,int si);
int rh(int a,int sz);
void rotate_bitmap(int bf[],int af[],int si);
void vMirror_bitmap(int bf[],int af[],int si);
int intncmp(int lt[],int rt[],int n);
long getUnique();
long getTotal();
void symmetryOps_bitmap(int si);
void NQueen(int row,int size);
//hh:mm:ss.ms形式に処理時間を出力
void TimeFormat(clock_t utime,char *form){
int dd,hh,mm;
float ftime,ss;
ftime=(float)utime/CLOCKS_PER_SEC;
mm=(int)ftime/60;
ss=ftime-(int)(mm*60);
dd=mm/(24*60);
mm=mm%(24*60);
hh=mm/60;
mm=mm%60;
if(dd)
sprintf(form,"%4d %02d:%02d:%05.2f",dd,hh,mm,ss);
else if(hh)
sprintf(form," %2d:%02d:%05.2f",hh,mm,ss);
else if(mm)
sprintf(form," %2d:%05.2f",mm,ss);
else
sprintf(form," %5.2f",ss);
}
void dtob(int score,int si) {
int bit=1; char c[si];
for (int i=0;i<si;i++) {
if (score&bit){ c[i]='1'; }else{ c[i]='0'; }
bit<<=1;
}
for (int i=si-1;i>=0;i--){ putchar(c[i]); }
printf("\n");
}
//
int rh(int a,int sz){
int tmp=0;
for(int i=0;i<=sz;i++){
if(a&(1<<i)){ return tmp|=(1<<(sz-i)); }
}
return tmp;
}
//
void vMirror_bitmap(int bf[],int af[],int si){
int score ;
for(int i=0;i<si;i++) {
score=bf[i];
af[i]=rh(score,si-1);
}
}
//
void rotate_bitmap(int bf[],int af[],int si){
for(int i=0;i<si;i++){
int t=0;
for(int j=0;j<si;j++){
t|=((bf[j]>>i)&1)<<(si-j-1); // x[j] の i ビット目を
}
af[i]=t; // y[i] の j ビット目にする
}
}
//
int intncmp(int lt[],int rt[],int n){
int rtn=0;
for(int k=0;k<n;k++){
rtn=lt[k]-rt[k];
if(rtn!=0){
break;
}
}
return rtn;
}
//
long getUnique(){
return COUNT2+COUNT4+COUNT8;
}
//
long getTotal(){
return COUNT2*2+COUNT4*4+COUNT8*8;
}
//
void symmetryOps_bitmap(int si){
int nEquiv;
// 回転・反転・対称チェックのためにboard配列をコピー
for(int i=0;i<si;i++){ aT[i]=aBoard[i];}
rotate_bitmap(aT,aS,si); //時計回りに90度回転
int k=intncmp(aBoard,aS,si);
if(k>0)return;
if(k==0){ nEquiv=2;}else{
rotate_bitmap(aS,aT,si); //時計回りに180度回転
k=intncmp(aBoard,aT,si);
if(k>0)return;
if(k==0){ nEquiv=4;}else{
rotate_bitmap(aT,aS,si);//時計回りに270度回転
k=intncmp(aBoard,aS,si);
if(k>0){ return;}
nEquiv=8;
}
}
// 回転・反転・対称チェックのためにboard配列をコピー
for(int i=0;i<si;i++){ aS[i]=aBoard[i];}
vMirror_bitmap(aS,aT,si); //垂直反転
k=intncmp(aBoard,aT,si);
if(k>0){ return; }
if(nEquiv>2){ //-90度回転 対角鏡と同等
rotate_bitmap(aT,aS,si);
k=intncmp(aBoard,aS,si);
if(k>0){return;}
if(nEquiv>4){ //-180度回転 水平鏡像と同等
rotate_bitmap(aS,aT,si);
k=intncmp(aBoard,aT,si);
if(k>0){ return;} //-270度回転 反対角鏡と同等
rotate_bitmap(aT,aS,si);
k=intncmp(aBoard,aS,si);
if(k>0){ return;}
}
}
if(nEquiv==2){COUNT2++;}
if(nEquiv==4){COUNT4++;}
if(nEquiv==8){COUNT8++;}
}
//
void NQueen(int size,int mask,int row,int left,int down,int right){
int bitmap=mask&~(left|down|right);
if(row==size){
if(!bitmap){
aBoard[row]=bitmap;
symmetryOps_bitmap(size);
}
}else{
while(bitmap){
// bit=(-bitmap&bitmap);
// bitmap=(bitmap^bit);
bitmap^=aBoard[row]=bit=(-bitmap&bitmap);
NQueen(size,mask,row+1,(left|bit)<<1,down|bit,(right|bit)>>1);
}
}
}
//メインメソッド
int main(int argc,char** argv) {
bool cpu=true,cpur=true,gpu=true;
int argstart=1,steps=24576;
/** パラメータの処理 */
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){gpu=false;cpur=false;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpu=false;gpu=false;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){cpu=false;cpur=false;}
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r] n steps\n",argv[0]);
printf(" -c: CPU only\n");
printf(" -r: CPUR only\n");
printf(" -g: GPU only\n");
printf("Default to 8 queen\n");
}
/** 出力と実行 */
/** CPU */
if(cpu){
printf("\n\n7.バックトラック+ビットマップ+対称解除法");
}
/** CPUR */
if(cpur){
printf("\n\n7.バックトラック+ビットマップ+対称解除法");
clock_t st; //速度計測用
char t[20]; //hh:mm:ss.msを格納
int min=4; //Nの最小値(スタートの値)を格納
int mask=0;
int max=17;
printf("%s\n"," N: Total Unique hh:mm:ss.ms");
for(int i=min;i<=max;i++){
COUNT2=COUNT4=COUNT8=0;
mask=(1<<i)-1;
for(int j=0;j<i;j++){ aBoard[j]=j; } //版を初期化
st=clock(); //計測開始
NQueen(i,mask,0,0,0,0);
TimeFormat(clock()-st,t); //計測終了
printf("%2d:%13ld%16ld%s\n",i,getTotal(),getUnique(),t); //出力
}
return 0;
}
/** GPU */
if(gpu){
if(!InitCUDA()){return 0;}
int min=4;int targetN=18;
struct timeval t0;struct timeval t1;int ss;int ms;int dd;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int i=min;i<=targetN;i++){
gettimeofday(&t0,NULL); // 計測開始
Total=solve_nqueen_cuda(i,steps);
gettimeofday(&t1,NULL); // 計測終了
if (t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%18ld%18ld%12.2d:%02d:%02d:%02d.%02d\n", i,Total,Unique,dd,hh,mm,ss,ms);
}
}
return 0;
}
|
5,694 | #include <stdio.h>
#include <cuda_runtime.h>
int main(int argc, char ** argv){
cudaError_t error;
printf("%s running...\n", argv[0]);
int devCount;
cudaGetDeviceCount(&devCount);
printf("number of devices: %d\n", devCount);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
printf("maxThreadsPerBlock = %d\n", devProp.maxThreadsPerBlock);
printf("max block dimension (%d, %d, %d)\n", devProp.maxThreadsDim[0],
devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
printf("max grid dimension (%d, %d, %d)\n", devProp.maxGridSize[0],
devProp.maxGridSize[1], devProp.maxGridSize[2]);
return 0;
}
|
5,695 | #include <assert.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <math.h>
#include <sys/time.h>
//#include <Windows.h>
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include <device_functions.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define GRAPH_SIZE 6144
#define WORK_SIZE 256
#define NTHREADS 1024
#define BLOCKS 16
#define EDGE_COST(graph, GRAPH_SIZE, a, b) graph[a * GRAPH_SIZE + b]
#define D(a, b) EDGE_COST(output, GRAPH_SIZE, a, b)
#define INF 0x1fffffff
//createGraph
void generate_random_graph(int* output) {
int i, j;
srand(0xdadadada);
for (i = 0; i < GRAPH_SIZE; i++) {
for (j = 0; j < GRAPH_SIZE; j++) {
if (i == j) {
D(i, j) = 0;
}
else {
int r;
r = rand() % 40;
if (r > 20) {
r = INF;
}
D(i, j) = r;
}
}
}
}
//sequencial GPU
__global__ void calculateSequencialGPU(int* output) {
int i, j, k;
for (k = 0; k < GRAPH_SIZE; k++) {
for (i = 0; i < GRAPH_SIZE; i++) {
for (j = 0; j < GRAPH_SIZE; j++) {
if (D(i, k) + D(k, j) < D(i, j)) {
D(i, j) = D(i, k) + D(k, j);
}
}
}
}
}
__global__ void calcWithoutAtomic1D(int* output, int k) {
int totalID = blockIdx.x * blockDim.x * WORK_SIZE + threadIdx.x * WORK_SIZE;
int i = totalID / GRAPH_SIZE;
int j = totalID % GRAPH_SIZE;
int counter = 0;
while (counter < WORK_SIZE)
{
if (D(i, k) + D(k, j) < D(i, j)) {
D(i, j) = D(i, k) + D(k, j);
}
if ((j + 1) < GRAPH_SIZE) {
j++;
}else {
i++;
j = 0;
}
counter++;
}
}
__global__ void calcWithAtomic1D(int* output, int* syncGrid) {
int totalID = blockIdx.x * blockDim.x * WORK_SIZE + threadIdx.x * WORK_SIZE;
int i, j, counter, k = 0, avaliador = 0, Dik;
while(k < GRAPH_SIZE){
i = totalID / GRAPH_SIZE;
j = totalID % GRAPH_SIZE;
counter = 0;
syncGrid[blockIdx.x] = 0;
while (counter < WORK_SIZE)
{
if (D(i,k) + D(k, j) < D(i, j)) {
D(i, j) = D(i,k) + D(k, j);
}
if (j + 1 < GRAPH_SIZE) {
j++;
}else {
i++;
j = 0;
}
counter++;
}
k++;
if (threadIdx.x == 0) {
syncGrid[blockIdx.x] = 1;
do{
avaliador = 0;
for(int i = 0; i < gridDim.x; i++){
avaliador += syncGrid[i];
}
}while(avaliador != gridDim.x);
}
__syncthreads();
}
}
__global__ void calcWithAtomic1DMem(int* output, int* syncGrid) {
int totalID = blockIdx.x * blockDim.x * WORK_SIZE + threadIdx.x * WORK_SIZE;
int i, j, counter, k = 0, avaliador = 0, Dik;
while(k < GRAPH_SIZE){
i = totalID / GRAPH_SIZE;
j = totalID % GRAPH_SIZE;
counter = 0;
Dik = D(i,k);
syncGrid[blockIdx.x] = 0;
while (counter < WORK_SIZE)
{
if (Dik + D(k, j) < D(i, j)) {
D(i, j) = Dik + D(k, j);
}
if (j + 1 < GRAPH_SIZE) {
j++;
}else {
i += ((i+1)< GRAPH_SIZE);
j = 0;
Dik = D(i,k);
}
counter++;
}
k++;
if (threadIdx.x == 0) {
syncGrid[blockIdx.x] = 1;
do{
avaliador = 0;
for(int i = 0; i < gridDim.x; i++){
avaliador += syncGrid[i];
}
}while(avaliador != gridDim.x);
}
__syncthreads();
}
}
void floyd_warshall_gpu(const int* graph, int* output) {
int* dev_a;
cudaMalloc(&dev_a, sizeof(int) * GRAPH_SIZE * GRAPH_SIZE);
cudaMemcpy(dev_a, graph, sizeof(int) * GRAPH_SIZE * GRAPH_SIZE, cudaMemcpyHostToDevice);
//calculateSequencialGPU << <1, 1 >> > (dev_a, GRAPH_SIZE);
int blocks;
int threads;
cudaOccupancyMaxPotentialBlockSize (&blocks, &threads, calcWithoutAtomic1D, 0, GRAPH_SIZE*GRAPH_SIZE);
//blocks = sqrt(blocks);
//threads = sqrt(threads);
int workPerThread = ((GRAPH_SIZE*GRAPH_SIZE) / (threads*blocks));// + 1;
printf("workPerThread= %d, blocks= %d threadsPerBlocks = %d\n", workPerThread, blocks, threads);
int* syncGrid;
cudaMalloc(&syncGrid, sizeof(int) * blocks);
for (int k = 0; k < GRAPH_SIZE; k++) {
calcWithoutAtomic1D <<<blocks, threads>>> (dev_a, k);
}
//calcWithAtomic1D<<<blocks, threads>>> (dev_a, syncGrid);
//calcWithAtomic1DShared<<<blocks, threads>>> (dev_a, syncGrid);
cudaError_t err = cudaMemcpy(output, dev_a, sizeof(int) * GRAPH_SIZE * GRAPH_SIZE, cudaMemcpyDeviceToHost);
gpuErrchk(err);
cudaFree(dev_a);
cudaFree(syncGrid);//teste
}
void floyd_warshall_cpu(const int* graph, int* output) {
int i, j, k;
for (k = 0; k < GRAPH_SIZE; k++) {
for (i = 0; i < GRAPH_SIZE; i++) {
for (j = 0; j < GRAPH_SIZE; j++) {
if (D(i, k) + D(k, j) < D(i, j)) {
D(i, j) = D(i, k) + D(k, j);
}
}
}
}
}
int main(int argc, char** argv) {
/*
LARGE_INTEGER frequency;
LARGE_INTEGER start;
LARGE_INTEGER end;
double interval;*/
#define TIMER_START() gettimeofday(&tv1, NULL)
#define TIMER_STOP() \
gettimeofday(&tv2, NULL); \
timersub(&tv2, &tv1, &tv); \
time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0
struct timeval tv1, tv2, tv;
float time_delta;
int* graph, * output_cpu, * output_gpu;
int size;
size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE;
graph = (int*)malloc(size);
assert(graph);
output_cpu = (int*)malloc(size);
assert(output_cpu);
memset(output_cpu, 0, size);
output_gpu = (int*)malloc(size);
assert(output_gpu);
generate_random_graph(graph);
fprintf(stderr, "running on cpu...\n");
TIMER_START();
memcpy(output_cpu, graph, sizeof(int) * GRAPH_SIZE * GRAPH_SIZE);
//QueryPerformanceFrequency(&frequency);
//QueryPerformanceCounter(&start);
floyd_warshall_cpu(graph, output_cpu);
TIMER_STOP();
fprintf(stderr, "%f seconds\n", time_delta);
//QueryPerformanceCounter(&end);
//interval = (double)(end.QuadPart - start.QuadPart) / frequency.QuadPart;
//fprintf(stderr, "%f seconds\n", interval);
fprintf(stderr, "running on gpu...\n");
TIMER_START();
//QueryPerformanceFrequency(&frequency);
//QueryPerformanceCounter(&start);
floyd_warshall_gpu(graph, output_gpu);
TIMER_STOP();
fprintf(stderr, "%f seconds\n", time_delta);
//QueryPerformanceCounter(&end);
//interval = (double)(end.QuadPart - start.QuadPart) / frequency.QuadPart;
//fprintf(stderr, "%f seconds\n", interval);
if (memcmp(output_cpu, output_gpu, size) != 0) {
fprintf(stderr, "FAIL!\n");
}
else {
fprintf(stderr, "Verified!\n");
}
return 0;
}
|
5,696 | #pragma once
namespace cuFIXNUM {
namespace internal
{
/*
* Return floor(log2(x)). In particular, if x = 2^b, return b.
*/
__device__
constexpr unsigned
floorlog2(unsigned x) {
return x == 1 ? 0 : 1 + floorlog2(x >> 1);
}
/*
* The following function gives a reasonable choice of WINDOW_SIZE in the k-ary
* modular exponentiation method for a fixnum of B = 2^b bytes.
*
* The origin of the table is as follows. The expected number of multiplications
* for the k-ary method with n-bit exponent and d-bit window is given by
*
* T(n, d) = 2^d - 2 + n - d + (n/d - 1)*(1 - 2^-d)
*
* (see Koç, C. K., 1995, "Analysis of Sliding Window Techniques for
* Exponentiation", Equation 1). The following GP script calculates the values
* of n at which the window size should increase (maximum n = 65536):
*
* ? T(n,d) = 2^d - 2 + n - d + (n/d - 1) * (1 - 2^-d);
* ? M = [ vecsort([[n, d, T(n, d)*1.] | d <- [1 .. 16]], 3)[1][2] | n <- [1 .. 65536] ];
* ? maxd = M[65536]
* 10
* ? [[d, vecmin([n | n <- [1 .. 65536], M[n] == d])] | d <- [1 .. maxd]]
* [[1, 1], [2, 7], [3, 35], [4, 122], [5, 369], [6, 1044], [7, 2823], [8, 7371], [9, 18726], [10, 46490]]
*
* Table entry i is the window size for a fixnum of 8*(2^i) bits (e.g. 512 =
* 8*2^6 bits falls between 369 and 1044, so the window size is that of the
* smaller, 369, so 5 is in place i = 6).
*/
// NB: For some reason we're not allowed to put this table in the definition
// of bytes_to_window_size().
constexpr int BYTES_TO_K_ARY_WINDOW_SIZE_TABLE[] = {
-1,
-1, //bytes bits
2, // 2^2 32
3, // 2^3 64
4, // 2^4 128
4, // 2^5 256
5, // 2^6 512
5, // 2^7 1024
6, // 2^8 2048
7, // 2^9 4096
8, //2^10 8192
8, //2^11 16384
9, //2^12 32768
10,//2^13 65536
};
__device__
constexpr int
bytes_to_k_ary_window_size(unsigned bytes) {
return BYTES_TO_K_ARY_WINDOW_SIZE_TABLE[floorlog2(bytes)];
}
/*
* This Table 2 from Koç, C. K., 1995, "Analysis of Sliding Window
* Techniques for Exponentiation".
*
* The resolution of this table is higher than the one above because it's
* used in the fixed exponent modexp code and can benefit from using the
* precise bit length of the exponent, whereas the table above has to
* accommodate multiple different exponents simultaneously.
*/
__constant__
int BYTES_TO_CLNW_WINDOW_SIZE_TABLE[] = {
-1, // bits
4, // 128
5, // 256
5, // 384
5, // 512
6, // 640
6, // 768
6, // 896
6, // 1024
6, // 1152
6, // 1280
6, // 1408
6, // 1536
6, // 1664
7, // 1792
7, // 1920
7, // 2048
};
__device__
constexpr int
bits_to_clnw_window_size(unsigned bits) {
// The chained ternary condition is forced upon us by the Draconian
// constraints of C++11 constexpr functions.
return
bits < 64 ? 2 :
bits < 128 ? 3 :
bits > 2048 ? 7 :
BYTES_TO_CLNW_WINDOW_SIZE_TABLE[(bits / 8) / 16];
}
} // End namespace internal
} // End namespace cuFIXNUM
|
5,697 | #include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_real_distribution.h>
// nvcc -arch=sm_70 -std=c++14 exemplo5.cu -o exemplo5 && ./exemplo5
struct raw_access {
thrust::minstd_rand rng;
thrust::uniform_real_distribution<double> dist;
raw_access (thrust::uniform_real_distribution<double> dist, thrust::minstd_rand rng) : dist(dist), rng(rng) {};
__device__ __host__
double operator()(const int &i) {
rng.discard(i);
return dist(rng);
}
};
// http://www.cplusplus.com/reference/random/linear_congruential_engine/discard/
int main(){
thrust::minstd_rand rng;
thrust::uniform_real_distribution<double> dist(25, 40);
thrust::device_vector<double> vetor(10, 0);
thrust::counting_iterator<int> iter(0);
raw_access ra(dist, rng);
thrust::transform(iter, iter+vetor.size(), vetor.begin(), ra);
thrust::host_vector<double> host(vetor);
for (auto i = host.begin(); i != host.end(); i++)
{
std::cout << *i << " "; // este acesso é rápido -- CPU
}
printf("\n");
}
|
5,698 | #include <iostream>
#include <chrono>
#include <cassert>
#include <cmath>
#include <cstdlib>
#include <vector>
#include <algorithm>
#define BLOCKSIZE 128
#define LOG_BLOCKSIZE 7
// MUST BE ASSOCIATIVE
__device__ inline int f(int a, int b){
return a + b;
}
/**
* In this variant, one optimization was applied on top of parallel_strided.cu:
* - We change the downsweep and upsweep iterations so that divergence is checked at the start
* of the iteration body. This results in a small performance improvement which makes
* further optimizations much easier to do.
**/
__global__ void scan(const int n, int *x, int *out){
__shared__ int scan_v[2 * BLOCKSIZE];
int tid = threadIdx.x;
int i = blockIdx.x * 2 * blockDim.x + threadIdx.x * 2;
scan_v[2 * tid] = x[i];
scan_v[(2 * tid) ^ 1] = x[i ^ 1];
scan_v[(2 * tid) ^ 1] = f(scan_v[(2 * tid) ^ 1], scan_v[2 * tid]);
__syncthreads();
for(int j = LOG_BLOCKSIZE - 2; j >= 0; j--){
if(tid < (1 << j)){
int j_complement = LOG_BLOCKSIZE - j;
int curr_tid = (tid << j_complement) | ((1 << j_complement) - 1);
int oth = curr_tid - (1 << (j_complement - 1));
scan_v[curr_tid] = f(scan_v[curr_tid], scan_v[oth]);
}
__syncthreads();
}
for(int j = 0; j < LOG_BLOCKSIZE; j++){
if(tid < (1 << j)){
int j_complement = (LOG_BLOCKSIZE - 1) - j;
int curr_tid = ((tid + 1) << (j_complement + 1)) | ((1 << j_complement) - 1);
int oth = curr_tid - (1 << j_complement);
scan_v[curr_tid] = f(scan_v[curr_tid], scan_v[oth]);
}
__syncthreads();
}
x[i] = scan_v[2 * tid];
x[i ^ 1] = scan_v[(2 * tid) ^ 1];
if(tid == blockDim.x - 1){
out[blockIdx.x] = scan_v[(2 * tid) ^ 1];
}
}
__global__ void propagate(const int n, int *in, int *out){
int bid = blockIdx.x + 1;
int i = bid * blockDim.x + threadIdx.x;
out[i] = f(out[i], in[bid - 1]);
}
std::vector<int> get_levels(const int n, int block_size){
std::vector<int> res;
int x = n;
while(x > 1){
res.push_back(x);
x = (x + block_size - 1) / block_size;
}
res.push_back(1);
return res;
}
int main(){
const int n = (1 << 28);
const int block_size = BLOCKSIZE;
assert(n % block_size == 0);
std::vector<int> levels = get_levels(n, block_size);
for(int i : levels){
std::cout << i << ' ';
}
std::cout << std::endl;
int *x = (int *) malloc(n * sizeof(int));
assert(x != NULL);
for(int i = 0; i < n; i++){
x[i] = 1;
}
int *d_arrays[levels.size()];
for(int i = 0; i < levels.size(); i++){
cudaMalloc(&d_arrays[i], levels[i] * sizeof(int));
assert(d_arrays[i] != NULL);
}
cudaMemcpy(d_arrays[0], x, levels[0] * sizeof(int), cudaMemcpyHostToDevice);
for(int i = 1; i < levels.size(); i++){
int block_count = levels[i];
scan<<<block_count, block_size / 2>>>(levels[i - 1], d_arrays[i - 1], d_arrays[i]);
}
for(int i = levels.size() - 2; i >= 1; i--){
int block_count = levels[i];
propagate<<<block_count - 1, block_size>>>(levels[i - 1], d_arrays[i], d_arrays[i - 1]);
}
int *result = (int *) malloc(n * sizeof(int));
cudaMemcpy(result, d_arrays[0], n * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < n; i++){
if(result[i] != i + 1){
std::cerr << i << ' ' << i + 1 << ' ' << result[i] << '\n';
return -1;
}
}
std::cout << "memory usage: " << n * sizeof(int) << " bytes" << std::endl;
} |
5,699 | #include "includes.h"
__global__ void device_transpose ()
{
int ivis, ihid ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
ihid = blockIdx.y ;
d_wtr[ivis*d_nhid_cols+ihid] = d_w[ihid*d_n_inputs_cols+ivis] ;
} |
5,700 | #include <stdio.h>
#include <stdlib.h>
__global__ void print_from_gpu(void) {
printf(
"Hello World from device!\n\
threadIdx.x: %d\n\
threadIdx.y: %d\n\
blockIdx.x: %d\n\
blockIdx.y: %d\n\
blockDim.x: %d\n\
blockDim.y: %d\n",
threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y);
}
int main(void) {
printf("Hello World from host!\n");
dim3 DimGrid(2, 2);
dim3 DimBlock(3, 2);
print_from_gpu<<<DimGrid, DimBlock>>>();
cudaDeviceSynchronize();
printf("Can this be ahead of kernel?\n");
printf("Dim change\n");
DimGrid.x = 2; DimGrid.y = 1;
DimBlock.x = 2; DimBlock.y = 3;
print_from_gpu<<<DimGrid, DimBlock>>>();
cudaDeviceSynchronize();
printf("sizeof(double): %d", sizeof(double));
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.