serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
101
|
#define THETA_N 4
#define SQRT_2 1.4142135623730951f
#define PI 3.141592653589793f
extern "C" {
/**
* Clears out the Gabor Energies Tensor, setting all of its values to zero.
* The Gabor Energies Tensor is the data structure whose [y, x, theta] value contains the average magnitude response to
* the different complex 'Gabor' filters for an specific 'theta' orientation at 'image' location (y, x).
* This is the first step towards its calculation. Note that the number of rows and columns in the Gabor Energies Tensor
* is (image_rows - (kernel_size >> 1)) X (image_cols - (kernel_size >> 1)) due to the padding lost at convolution.
* @param gabor_energies The Gabor Energies Tensor.
* @param rows The number of rows in the 'image' whose Energies Tensor will be calculated.
* @param cols The number of columns in the 'image' whose Energies Tensor will be calculated.
* @param kernel_size Both the number of rows and columns in the Gabor kernels to apply. Should be an odd number.
*/
__global__ void resetGaborEnergiesTensor(float* gabor_energies, int rows, int cols, int kernel_size)
{
int image_y = blockDim.y * blockIdx.y + threadIdx.y;
int image_x = blockDim.x * blockIdx.x + threadIdx.x;
int image_padding = (kernel_size >> 1);
if (image_y < image_padding || image_y + image_padding >= rows ||
image_x < image_padding || image_x + image_padding >= cols)
return; // Part of the padding lost due to lack of border information.
int tensor_offset = ((image_y - image_padding) * (cols - (image_padding << 1)) + (image_x - image_padding)) * THETA_N;
for (int i = 0; i < THETA_N; i++)
gabor_energies[tensor_offset + i] = 0.0f;
}
/**
* Applies a 2D Complex Convolution on a real image given a square kernel and adds its magnitude response to the
* corresponding [y, x, theta] location in the Gabor Energies Tensor.
* This kernel is the second step towards its calculation and should be called once for every frequency to apply.
* @param gabor_energies The Gabor Energies Tensor.
* @param theta_idx The orientation index for the Gabor Energies Tensor specifying the orientation for which to add
* this convolution.
* @param image The image on which to apply the convolution operation.
* @param rows The number of rows in 'image'.
* @param cols The number of columns in 'image'.
* @param real_kernel The real part of the square convolution kernel to apply on 'image'.
* @param imag_kernel The imaginary part of the square convolution kernel to apply on 'image'.
* @param kernel_size Both the number of rows and columns in 'kernel'. Should be an odd number.
*/
__global__ void addGaborFilterMagnitudeResponse(float* gabor_energies, int theta_idx,
unsigned char* image, int rows, int cols,
float* real_kernel, float* imag_kernel, int kernel_size)
{
int image_y = blockDim.y * blockIdx.y + threadIdx.y;
int image_x = blockDim.x * blockIdx.x + threadIdx.x;
int image_padding = (kernel_size >> 1);
if (image_y < image_padding || image_y + image_padding >= rows ||
image_x < image_padding || image_x + image_padding >= cols)
return; // Part of the padding lost due to lack of border information.
int image_idx = (image_y - image_padding) * cols + (image_x - image_padding), kernel_idx = 0;
float real_response = 0.0f, imag_response = 0.0f;
for (int i = 0; i < kernel_size; i++)
{
for (int j = 0; j < kernel_size; j++)
{
real_response += image[image_idx] * real_kernel[kernel_idx];
imag_response += image[image_idx] * imag_kernel[kernel_idx];
image_idx++;
kernel_idx++;
}
image_idx += cols - kernel_size;
}
int tensor_offset = ((image_y - image_padding) * (cols - (image_padding << 1)) + (image_x - image_padding)) * THETA_N;
gabor_energies[tensor_offset + theta_idx] = sqrtf(real_response * real_response + imag_response * imag_response);
}
/**
* Divides all of the Gabor Energies Tensor elements by a constant.
* This is the third and last step to calculate the Tensor. This step is used to average out the magnitude responses of
* the different applied Gabor kernels: for a given [y, x, theta], one is applied per frequency.
* @param gabor_energies The Gabor Energies Tensor.
* @param rows The number of rows in the 'image' whose Energies Tensor will be calculated.
* @param cols The number of columns in the 'image' whose Energies Tensor will be calculated.
* @param kernel_size Both the number of rows and columns in the applied Gabor kernels. Should be an odd number.
* @param constant The number by which to divide all of the Gabor Energies Tensor elements. Should be equal to the
* number of applied frequencies.
*/
__global__ void divideGaborEnergiesTensor(float* gabor_energies, int rows, int cols, int kernel_size, int constant)
{
int image_y = blockDim.y * blockIdx.y + threadIdx.y;
int image_x = blockDim.x * blockIdx.x + threadIdx.x;
int image_padding = (kernel_size >> 1);
if (image_y < image_padding || image_y + image_padding >= rows ||
image_x < image_padding || image_x + image_padding >= cols)
return; // Part of the padding lost due to lack of border information.
int tensor_offset = ((image_y - image_padding) * (cols - (image_padding << 1)) + (image_x - image_padding)) * THETA_N;
for (int i = 0; i < THETA_N; i++)
gabor_energies[tensor_offset + i] /= constant;
}
/**
* Combines the Gabor Energies Tensor into a Matrix by joining the magnitude response of the different thetas into a
* single one with a corresponding combined energy and combined phase (angle). This takes into consideration the two
* strongest orientations (thetas) and linearly joining their equivalent plane components. The two weakest components
* are subtracted from the strongest ones since random textures tend to equally respond to different Gabor kernels.
* @param gabor_energies The Gabor Energies Tensor.
* @param rows The number of rows in 'gabor_energies'.
* @param cols The number of columns in 'gabor_energies'.
* @param combined_energies The resulting magnitude response from combining the Gabor energies at different thetas.
* @param combined_phases The resulting phase response from combining the Gabor energies at different thetas.
*/
__global__ void combineGaborEnergies(float* gabor_energies, int rows, int cols,
float* combined_energies, float* combined_phases, float* confidence)
{
int image_y = blockDim.y * blockIdx.y + threadIdx.y;
int image_x = blockDim.x * blockIdx.x + threadIdx.x;
int offset = image_y * cols + image_x;
if (image_y >= rows || image_x >= cols)
return; // Out of image.
int descending_energies_arg[THETA_N];
float temp_energies[THETA_N];
for (int i = 0; i < THETA_N; i++)
temp_energies[i] = gabor_energies[THETA_N * offset + i];
for (int i = 0; i < THETA_N; i++)
{
int max_idx = 0;
float max_energy = temp_energies[0];
for (int j = 1; j < THETA_N; j++)
if (temp_energies[j] > max_energy)
{
max_idx = j;
max_energy = temp_energies[j];
}
descending_energies_arg[i] = max_idx;
temp_energies[max_idx] = -1.0f;
}
//consider only relevant voters, where the confidence is over a 0.5 threshold
/*if((1 - ((gabor_energies[THETA_N * offset + descending_energies_arg[1]] +
gabor_energies[THETA_N * offset + descending_energies_arg[2]] +
gabor_energies[THETA_N * offset + descending_energies_arg[3]] )/
(3*gabor_energies[THETA_N * offset + descending_energies_arg[0]])))<0.50){
combined_energies[offset] =0;
combined_phases[offset]= PI/2;
return; //confidence is below threshold, there is not a well defined orientation
}*/
float s1 = (gabor_energies[THETA_N * offset + descending_energies_arg[0]] -
gabor_energies[THETA_N * offset + descending_energies_arg[3]]);
float s2 = (gabor_energies[THETA_N * offset + descending_energies_arg[1]] -
gabor_energies[THETA_N * offset + descending_energies_arg[2]]);
int theta_idx1 = descending_energies_arg[0];
int theta_idx2 = descending_energies_arg[1];
float combined_y = 0.0f, combined_x = 0.0f;
switch(theta_idx1)
{
case 0:
if (theta_idx2 == 1)
{
combined_y = s1 + s2 / SQRT_2;
combined_x = s2 / SQRT_2;
}
else if (theta_idx2 == 3)
{
combined_y = s1 + s2 / SQRT_2;
combined_x = -s2 / SQRT_2;
}
break;
case 1:
if (theta_idx2 == 0)
{
combined_y = s1 / SQRT_2 + s2;
combined_x = s1 / SQRT_2;
}
else if (theta_idx2 == 2)
{
combined_y = s1 / SQRT_2;
combined_x = s1 / SQRT_2 + s2;
}
break;
case 2:
if (theta_idx2 == 1)
{
combined_y = s2 / SQRT_2;
combined_x = s1 + s2 / SQRT_2;
}
else if (theta_idx2 == 3)
{
combined_y = s2 / SQRT_2;
combined_x = -s1 - s2 / SQRT_2;
}
break;
case 3:
if (theta_idx2 == 0)
{
combined_y = s1 / SQRT_2 + s2;
combined_x = -s1 / SQRT_2;
}
else if (theta_idx2 == 2)
{
combined_y = s1 / SQRT_2;
combined_x = -s1 / SQRT_2 - s2;
}
break;
}
/*confidence[offset] = (1 - (
(gabor_energies[THETA_N * offset + descending_energies_arg[1]] +
gabor_energies[THETA_N * offset + descending_energies_arg[2]] +
gabor_energies[THETA_N * offset + descending_energies_arg[3]] )
/(3*gabor_energies[THETA_N * offset + descending_energies_arg[0]])));*/
combined_energies[offset] = sqrtf(combined_y * combined_y + combined_x * combined_x);
combined_phases[offset] = atan2f(combined_y, combined_x);
//printf("%f\n", combined_energies[offset]);
}
/**
* Generates votes for all of the Vanishing Point candidates by allowing all of the voting region to assign a voting
* weight for their preferred candidates. The candidate region is assumed to be directly above the voting region
* (combined components) such that concatenated are part of a continuous region of the original image.
* @param combined_energies The resulting magnitude response from combining the Gabor energies at different thetas.
* @param combined_phases The resulting phase response from combining the Gabor energies at different thetas.
* @param candidates The Vanishing Point candidates, being a region directly above the voting region which should also
* correspond to a stripe around the horizon line.
* @param voters_rows The number of rows in both 'combined_energies' and 'combined_phases'.
* @param candidates_rows The number of rows in 'candidates'.
* @param cols The number of columns in all three: 'combined_energies', 'combined_phases', and 'candidates'.
*/
__global__ void voteForVanishingPointCandidates(float* combined_energies, float* combined_phases, float* candidates,
int voters_rows, int candidates_rows, int cols)
{
int image_y = blockDim.y * blockIdx.y + threadIdx.y;
int image_x = blockDim.x * blockIdx.x + threadIdx.x;
if (image_y >= voters_rows || image_x >= cols)
return; // Out of image.
int energies_offset = image_y * cols + image_x;
int candidates_y_offset = (image_y+(candidates_rows-voters_rows));
if(!candidates_y_offset) return;
//float energy = combined_energies[energies_offset];
/*if (energy < 0.085)
return; // Filter Noise*/
float phase = combined_phases[energies_offset];
float cot = 1.0f / tanf(phase);
int i=0;
for (int candidates_y = candidates_y_offset ; candidates_y >= 0; candidates_y--)
{
int y_delta = candidates_y_offset-candidates_y; //image_y - candidates_y + candidates_rows;candidates_y_offset-
int candidates_x = image_x + cot * y_delta;
i++;
if (candidates_x >= 0 && candidates_x < cols )
atomicAdd(&candidates[(candidates_y)*cols + candidates_x], (abs(sinf(phase*2)*abs(sinf(phase*2)))));
//candidates_y_offset --;
}
}
/**
* Generates votes for the posible orientarions of the road's main edges by comparing the orientation of each pixel
* with the angle between itself and the vanishing point estimate. The pixel will emit a vote for the angle between
* the vanishing point estimate and itself; this vote is inversely proportional to the difference between its orientation
* and the angle with the vanishing point estimate. The two most voted orientations will correspond to the two edges
* of the road.
* @param combined_energies The resulting magnitude response from combining the Gabor energies at different thetas.
* @param combined_phases The resulting phase response from combining the Gabor energies at different thetas.
* @param rows The number of rows in 'gabor_energies'.
* @param cols The number of columns in 'gabor_energies'.
* @param vanishing_point_row The y coordinate of the vanishing point estimate.
* @param vanishing_point_col The x coordinate of the vanishing point estimate.
* @param direction_vector The vector of directions from the vanishing point estimate.
*/
__global__ void getRoadEdges(int candidate_number, const float * combined_energies, const float * combined_phases, int rows, int cols,
int * vanishing_point_candidates, float * direction_vector, float * vp_score)
{
int image_x = blockDim.x * blockIdx.x + threadIdx.x;
int image_y = blockDim.y * blockIdx.y + threadIdx.y;
int offset = image_y * cols + image_x;
if (image_y >= rows || image_x >= cols)
return; // Out of image.
if(combined_energies[offset]==0)
return;
float alpha;float arg;
for(int i=0; i<candidate_number; i++){
int x = (-image_x+vanishing_point_candidates[i*2+1]); //col
int y = (image_y-vanishing_point_candidates[i*2]); //row
if(x!=0)
alpha = atanf((float)y/(float)x);
else alpha = PI/2;
if (alpha<0) alpha +=PI;
arg = exp2f(-abs(alpha-combined_phases[offset])*PI);
atomicAdd(&direction_vector[(int)(lroundf(alpha*179.0/PI)) + 180*i], arg);
atomicAdd(&vp_score[i], arg);
}
}
__global__ void getSupportingPixels(int row, int col, float * combined_phases, float* combined_energies, int rows, int cols, float * support_pixels){
int image_x = blockDim.x * blockIdx.x + threadIdx.x;
int image_y = blockDim.y * blockIdx.y + threadIdx.y;
int offset = image_y * cols + image_x;
if (image_y >= rows || image_x >= cols)
return; // Out of image.
if(combined_energies[offset]!=0){
float alpha;
int x = (-image_x+col); //col
int y = (image_y-row); //row
if(x!=0)
alpha = atanf((float)y/(float)x);
else alpha = PI/2;
if (alpha<0) alpha +=PI;
if(abs(alpha-combined_phases[offset])<0.1){
support_pixels[offset] = 255;
}else
support_pixels [offset] = 0;
support_pixels[offset] = exp2f(-abs(alpha-combined_phases[offset])*PI);
}
else
support_pixels[offset] = 0;
}
}
|
102
|
#include "includes.h"
__global__ void k0( float* g_dataA, float* g_dataB, int pitch, int width )
{
// global thread(data) row index
unsigned int i = blockIdx.y * blockDim.y + threadIdx.y;
i = i + 1; //because the edge of the data is not processed
// global thread(data) column index
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
j = j + 1; //because the edge of the data is not processed
// check the boundary
if( i >= width - 1|| j >= width - 1 || i < 1 || j < 1 ) return;
g_dataB[i * pitch + j] = (
0.2f * g_dataA[i * pitch + j] + //itself
0.1f * g_dataA[(i-1) * pitch + j ] + //N
0.1f * g_dataA[(i-1) * pitch + (j+1)] + //NE
0.1f * g_dataA[ i * pitch + (j+1)] + //E
0.1f * g_dataA[(i+1) * pitch + (j+1)] + //SE
0.1f * g_dataA[(i+1) * pitch + j ] + //S
0.1f * g_dataA[(i+1) * pitch + (j-1)] + //SW
0.1f * g_dataA[ i * pitch + (j-1)] + //W
0.1f * g_dataA[(i-1) * pitch + (j-1)] //NW
) * 0.95f;
}
|
103
|
#include "includes.h"
/*
Copyright 2014-2015 Dake Feng, Peri LLC, dakefeng@gmail.com
This file is part of TomograPeri.
TomograPeri is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TomograPeri is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TomograPeri. If not, see <http://www.gnu.org/licenses/>.
*/
#define blockx 16
#define blocky 16
__global__ void _kernelpp_cuda(int num_projections, float mov, int num_pixels, int num_grid, int num_slices, float* dev_gridx, float* dev_gridy, float* dev_suma, float * dev_E, float* dev_data, float * dev_recon, float* dev_theta){
uint q = blockIdx.x*blockDim.x + threadIdx.x;
uint m = blockIdx.y*blockDim.y + threadIdx.y;
const double PI = 3.141592653589793238462;
bool quadrant;
float sinq, cosq;
float xi, yi;
float srcx, srcy, detx, dety;
float slope, islope;
int n,i,j,k;
int alen, blen, len;
int i1, i2;
float x1, x2;
int indx, indy;
int io;
float midx, midy, diffx, diffy;
float simdata;
float upd;
float coordx[MAX_NUM_GRID];
float coordy[MAX_NUM_GRID];
float ax[MAX_NUM_GRID];
float ay[MAX_NUM_GRID];
float bx[MAX_NUM_GRID];
float by[MAX_NUM_GRID];
float coorx[MAX_NUM_GRID*2];
float coory[MAX_NUM_GRID*2];
float leng[MAX_NUM_GRID*2];
int indi[MAX_NUM_GRID*2];
if((m>=num_pixels)||(q>=num_projections))
return;
// Calculate the sin and cos values
// of the projection angle and find
// at which quadrant on the cartesian grid.
sinq = sin(dev_theta[q]);
cosq = cos(dev_theta[q]);
if ((dev_theta[q] >= 0 && dev_theta[q] < PI/2) ||
(dev_theta[q] >= PI && dev_theta[q] < 3*PI/2)) {
quadrant = true;
} else {
quadrant = false;
}
// Find the corresponding source and
// detector locations for a given line
// trajectory of a projection (Projection
// is specified by sinq and cosq).
xi = -1e6;
yi = -(num_pixels-1)/2.+m+mov;
srcx = xi*cosq-yi*sinq;
srcy = xi*sinq+yi*cosq;
detx = -xi*cosq-yi*sinq;
dety = -xi*sinq+yi*cosq;
// Find the intersection points of the
// line connecting the source and the detector
// points with the reconstruction grid. The
// intersection points are then defined as:
// (coordx, gridy) and (gridx, coordy)
slope = (srcy-dety)/(srcx-detx);
islope = 1/slope;
for (n = 0; n <= num_grid; n++) {
coordx[n] = islope*(dev_gridy[n]-srcy)+srcx;
coordy[n] = slope*(dev_gridx[n]-srcx)+srcy;
}
// Merge the (coordx, gridy) and (gridx, coordy)
// on a single array of points (ax, ay) and trim
// the coordinates that are outside the
// reconstruction grid.
alen = 0;
blen = 0;
for (n = 0; n <= num_grid; n++) {
if (coordx[n] > dev_gridx[0]) {
if (coordx[n] < dev_gridx[num_grid]) {
ax[alen] = coordx[n];
ay[alen] = dev_gridy[n];
alen++;
}
}
if (coordy[n] > dev_gridy[0]) {
if (coordy[n] < dev_gridy[num_grid]) {
bx[blen] = dev_gridx[n];
by[blen] = coordy[n];
blen++;
}
}
}
len = alen+blen;
// Sort the array of intersection points (ax, ay).
// The new sorted intersection points are
// stored in (coorx, coory).
i = 0;
j = 0;
k = 0;
if (quadrant) {
while (i < alen && j < blen)
{
if (ax[i] < bx[j]) {
coorx[k] = ax[i];
coory[k] = ay[i];
i++;
k++;
} else {
coorx[k] = bx[j];
coory[k] = by[j];
j++;
k++;
}
}
while (i < alen) {
coorx[k] = ax[i];
coory[k] = ay[i];
i++;
k++;
}
while (j < blen) {
coorx[k] = bx[j];
coory[k] = by[j];
j++;
k++;
}
} else {
while (i < alen && j < blen)
{
if (ax[alen-1-i] < bx[j]) {
coorx[k] = ax[alen-1-i];
coory[k] = ay[alen-1-i];
i++;
k++;
} else {
coorx[k] = bx[j];
coory[k] = by[j];
j++;
k++;
}
}
while (i < alen) {
coorx[k] = ax[alen-1-i];
coory[k] = ay[alen-1-i];
i++;
k++;
}
while (j < blen) {
coorx[k] = bx[j];
coory[k] = by[j];
j++;
k++;
}
}
// Calculate the distances (leng) between the
// intersection points (coorx, coory). Find
// the indices of the pixels on the
// reconstruction grid (indi).
for (n = 0; n < len-1; n++) {
diffx = coorx[n+1]-coorx[n];
diffy = coory[n+1]-coory[n];
leng[n] = sqrt(diffx*diffx+diffy*diffy);
midx = (coorx[n+1]+coorx[n])/2;
midy = (coory[n+1]+coory[n])/2;
x1 = midx+num_grid/2.;
x2 = midy+num_grid/2.;
i1 = (int)(midx+num_grid/2.);
i2 = (int)(midy+num_grid/2.);
indx = i1-(i1>x1);
indy = i2-(i2>x2);
indi[n] = indx+indy*num_grid;
}
// Note: The indices (indi) and the corresponding
// weights (leng) are the same for all slices. So,
// there is no need to calculate them for each slice.
//*******************************************************
// Below is for updating the reconstruction grid.
for (n = 0; n < len-1; n++) {
// suma[indi[n]] += leng[n];
atomicAdd(&(dev_suma[indi[n]]),leng[n]);
}
for (k = 0; k < num_slices; k++) {
i = k*num_grid*num_grid;
io = m + k*num_pixels + q*num_slices*num_pixels;
simdata = 0;
for (n = 0; n < len-1; n++) {
simdata += dev_recon[indi[n]+i] * leng[n];
}
upd = dev_data[io]/simdata;
for (n = 0; n < len-1; n++) {
// E[indi[n]+i] -= dev_recon[indi[n]+i]*upd*leng[n];
atomicAdd(&(dev_E[indi[n]+i]),-dev_recon[indi[n]+i]*upd*leng[n]);
}
}
}
|
104
|
#include "includes.h"
// Optimized using shared memory and on chip memory
// Compile source: $- nvcc src/TokamakSimulation.cu -o nBody -lglut -lm -lGLU -lGL
// Run Executable: $- ./nBody
//To stop hit "control c" in the window you launched it from.
//Make movies https://gist.github.com/JPEGtheDev/db078e1b066543ce40580060eee9c1bf
#define NR_NEUTRONS 8
#define NR_ELECTRONS 8
#define NR_PROTONS 8
//atomic mass (u)
#define MASS_PROTON 1.007276
#define MASS_NEUTRON 1.008664
#define MASS_ELECTRON 5.485799e-4
#define BLOCK 256
#define XWindowSize 2500
#define YWindowSize 2500
#define DRAW 10
#define DAMP 1.0
#define DT 0.001
#define STOP_TIME 10.0
#define G 6.67408E-11
#define H 1.0
#define EYE 8.5
#define FAR 80.0
#define SHAPE_CT 24
#define SHAPE_SIZE 256
#define PATH "./objects/Tokamak_256.obj" //256 vertices-shape (for array simplicity)
#define N 16*16*16
//***********************
// TODO:
// Check units velocity calculation mag
// ಠ_ಠ
//***********************
// Globals
float4 *p;
float3 *v, *f, *reactor,*r_GPU0, *r_GPU1;
float4 *p_GPU0, *p_GPU1;
__global__ void moveBodies(float4 *g_pos, float4 *d_pos, float3 *vel, float3 * force, int offset){
int id = threadIdx.x + blockDim.x*blockIdx.x;
if(id < N){
vel[id].x += ((force[id].x-DAMP*vel[id].x)/d_pos[id].w)*DT;
vel[id].y += ((force[id].y-DAMP*vel[id].y)/d_pos[id].w)*DT;
vel[id].z += ((force[id].z-DAMP*vel[id].z)/d_pos[id].w)*DT;
d_pos[id].x += vel[id].x*DT;
d_pos[id].y += vel[id].y*DT;
d_pos[id].z += vel[id].z*DT;
g_pos[id+offset].x = d_pos[id].x;
g_pos[id+offset].y = d_pos[id].y;
g_pos[id+offset].z = d_pos[id].z;
}
}
|
105
|
#include <cmath>
__global__ void my_copysign(double* v)
{
int i = threadIdx.x;
*v = std::pow(-1, i) * (*v);
}
|
106
|
#include <iostream>
#include "../ginkgo/GOrderList.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
using namespace std;
__global__ void test(){
int pos = 0, ppos = 0, pnl = 0;
// Creating an OrderList struct
gpu_ginkgo::OrderList<100, 10> ggol(true, 1024, 10);
ggol.getTime(1.5, 1.0);
printf("<<< CREATING A NEW ORDER LIST STRUCTURE >>>\n");
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
int q_lim;
// SENDING SELLING ORDERS
printf("<<< SENDING NEW SELLING ORDERS >>>\n");
q_lim = 2;
ggol.sendNewSellingOrders(1029, 1033, q_lim);
ggol.getTime(1.6, 1.0);
q_lim = 2;
ggol.sendNewSellingOrders(1029, 1033, q_lim);
ggol.getTime(1.7, 1.0);
q_lim = 2;
ggol.sendNewSellingOrders(1030, 1034, q_lim);
ggol.getTime(1.8, 1.0);
q_lim = 2;
ggol.sendNewSellingOrders(1031, 1035, q_lim);
ggol.getTime(1.9, 1.0);
q_lim = 37;
ggol.sendNewSellingOrders(1029, 1033, q_lim);
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.updatePositions(pos);
ggol.updatePnl(pnl);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// SENDING NEW BUYING ORDERS
printf("<<< SENDING NEW BUYING ORDERS >>>\n");
q_lim = 2;
ggol.sendNewBuyingOrders(1028, 1032, q_lim);
ggol.getTime(2.0, 1.0);
q_lim = 3;
ggol.sendNewBuyingOrders(1027, 1031, q_lim);
ggol.getTime(2.1, 1.0);
q_lim = 4;
ggol.sendNewBuyingOrders(1026, 1030, q_lim);
ggol.getTime(2.2, 1.0);
q_lim = 17;
ggol.sendNewBuyingOrders(1028, 1032, q_lim);
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.updatePositions(pos);
ggol.updatePnl(pnl);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// 2.9s Update
printf("<<< 2.9s BOOK UPDATES >>>\n");
ggol.getTime(2.9, 0.17);
int bz[10] = {10, 10, 15, 15, 20, 20, 15, 5, 25, 45};
ggol.updatePendingOrders(bz);
ggol.updateCancelOrders(1029, bz);
ggol.updateAckedOrders(1029, bz);
ggol.preCanceling(1030, 1032);
q_lim = 37;
ggol.sendNewSellingOrders(1029, 33, q_lim);
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.updatePositions(pos);
ggol.updatePnl(pnl);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// 3.2s Update
printf("<<< 3.2s BOOK UPDATES >>>\n");
ggol.getTime(3.2, 0.17);
for(int i=0;i<10;++i) bz[i] = (i+1)*10;
ggol.updatePendingOrders(bz);
ggol.updateCancelOrders(1029, bz);
ggol.updateAckedOrders(1029, bz);
ggol.preCanceling(1030, 1032);
q_lim = 37;
ggol.sendNewSellingOrders(1029, 1033, q_lim);
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.updatePositions(pos);
ggol.updatePnl(pnl);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// A trade comes with price = 1030, qty = 90;
printf("<<< TRADE: PRICE = 1030, QTY = 90 >>>\n");
ggol.getTime(3.3, 0.17);
int tv = 75;
ggol.getTradedThrough(tv, 1030, bz);
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.updatePositions(pos);
ggol.updatePnl(pnl);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
for(int i=0;i<10;++i) printf("%d: \t%d\n",1024+i, bz[i]);
printf("\n");
printf("--------------------------------------------------------\n\n");
// Test finished
printf("\n <<< TEST FINISHED !!! >>>\n");
}
int main(){
def_dvec(float) dev_out(1, 0);
test<<<1, 1>>>();
return 0;
}
|
107
|
/**
* Copyright 2020 Sajeeb Roy Chowdhury
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software
* is furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <cuda.h>
#ifndef CRC_polynomial_cuda
#define CRC_polynomial_cuda
__device__ __host__
int remainder_is_nonzero(const int& da, bool* A, const int& db, const uint64_t& B)
// returns true if the remainder of A after division by B is nonzero
{
for (int i = da + db - 1; i >= db; i--) {
const bool& ai = A[i];
if (ai)
for (int j = db, k = i; j > -1; j--, k--) {
A[k] = (A[k]^((B >> (db-j))&1));
}
}
for (int k = db - 1; k > -1; k--) {
if (A[k]) {
return true;
}
}
return false;
}
template<int da, int dc>
__device__ __host__
bool test_all_two_bit_patterns(const uint64_t& C)
// returns true if division by C leaves a nonzero remainder for all two bit error patters
{
bool B[da + dc];
bool A[da + dc];
memset(A, 0, sizeof(A));
memset(B, 0, sizeof(B));
for (int i = 0; i < da; i++) {
A[i] = 1;
for (int j = i + 1; j < da; j++) {
A[j] = 1;
for (int k = 0; k < da; k++) B[dc + k] = A[k];
for (int k = 0; k < dc; k++) B[k] = 0;
if (!remainder_is_nonzero (da, B, dc, C)) return false;
#if __CUDA_ARCH__
__syncthreads();
#endif
A[j] = 0;
}
A[i] = 0;
}
return true;
}
template<int da, int dc>
__device__ __host__
bool test_all_three_bit_patterns(const uint64_t& C)
// returns true if division by C leaves a nonzero remainder for all two bit error patters
{
bool B[da + dc];
bool A[da + dc];
memset(A, 0, sizeof(A));
memset(B, 0, sizeof(B));
for (int i1 = 0; i1 < da; i1++) {
A[i1] = 1;
for (int i2 = i1 + 1; i2 < da; i2++) {
A[i2] = 1;
for (int i3 = i2 + 1; i3 < da; i3++) {
A[i3] = 1;
for (int h = 0; h < da; h++) B[dc + h] = A[h];
for (int h = 0; h < dc; h++) B[h] = 0;
if (!remainder_is_nonzero (da, B, dc, C)) return false;
#if __CUDA_ARCH__
__syncthreads();
#endif
A[i3] = 0;
}
A[i2] = 0;
}
A[i1] = 0;
}
return true;
}
template<int da, int dc>
__global__
void CRC_polynomial_cuda_t2(uint64_t C, uint64_t e, bool* res) {
uint64_t thread_id = threadIdx.x + blockIdx.x * blockDim.x;
bool ret = false;
res[thread_id] = ret;
C += thread_id;
if (!(C&1)) return;
if (C >= e) return;
if (C > (1ul<<(dc+1))-1) return;
ret = test_all_two_bit_patterns<da, dc>(C);
res[thread_id] = ret;
}
template<int da, int dc>
__global__
void CRC_polynomial_cuda_t3(uint64_t* data, bool* res, size_t size) {
uint64_t thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id > size) return;
res[thread_id] = test_all_three_bit_patterns<da, dc>(data[thread_id]);
}
#endif
|
108
|
#include "includes.h"
__global__ void squareMatrixMult(float *d_a, float *d_b, float *d_result, int n)
{
__shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub) {
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n) {
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n) {
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = threadIdx.x/n; k < BLOCK_SIZE; ++k) {
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n) {
d_result[row * n + col] = tmp;
}
}
|
109
|
#include "includes.h"
__global__ void rsqrt_kernel_large(float* x, unsigned int len, unsigned int rowsz) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * rowsz;
if (idx < len) x[idx] = x[idx] > 0 ? rsqrt(x[idx]) : 0;
}
|
110
|
#include <cuda_runtime.h>
//#include <cublas_v2.h>
//#include <cublasXt.h>
//#include <cudnn.h>
//#include <nccl.h>
#include <cassert>
#include <chrono>
#include <iostream>
#define CUDA_CHECK(e) (assert(cudaSuccess == (e)))
#define CUBLAS_CHECK(e) (assert(CUBLAS_STATUS_SUCCESS == (e)))
#define CUDNN_CHECK(e) (assert(CUDNN_STATUS_SUCCESS == (e)))
int main(int argc, const char** argv) {
int num_devices = 0;
CUDA_CHECK(cudaGetDeviceCount(&num_devices));
std::clog << "num devices: " << num_devices << std::endl;
num_devices = 1;
const size_t buf_size = 32UL;
cudaStream_t stream = NULL;
float* x_h = NULL;
float* x = NULL;
float* y = NULL;
x_h = (float*)malloc(buf_size * sizeof(float));
CUDA_CHECK(cudaSetDevice(0));
CUDA_CHECK(cudaStreamCreate(&stream));
CUDA_CHECK(cudaMalloc((void**)&x, buf_size * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&y, buf_size * sizeof(float)));
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaDeviceSynchronize());
const int num_trials = 100;
double avg_elapsed_ms = 0.0;
std::clog << "running: reduce" << std::endl;
for (int t = 0; t < num_trials; ++t) {
for (size_t i = 0; i < buf_size; ++i) {
x_h[i] = 42.0f;
}
std::clog << "DEBUG: x_h[0]: before: " << x_h[0] << std::endl;
auto start = std::chrono::steady_clock::now();
CUDA_CHECK(cudaSetDevice(0));
//reduce<float, AtomicReduceMap><<<(buf_size+1024-1)/1024, 1024, 0, stream>>>(
// buf_size, x, y);
CUDA_CHECK(cudaMemcpyAsync(
x_h,
x,
buf_size * sizeof(float),
cudaMemcpyDeviceToHost,
stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
std::clog << "DEBUG: x_h[0]: after: " << x_h[0] << std::endl;
auto lap = std::chrono::steady_clock::now();
auto diff = lap - start;
avg_elapsed_ms += std::chrono::duration<double, std::milli>(diff).count();
}
avg_elapsed_ms /= num_trials;
double avg_bandwidth = ((double)(buf_size * sizeof(float)) * 1.0e-9) / (avg_elapsed_ms * 1.0e-3);
std::clog << " avg wallclock: " << avg_elapsed_ms << " ms" << std::endl;
std::clog << " avg bandwidth: " << avg_bandwidth << " GB/s" << std::endl;
return 0;
}
|
111
|
#include "includes.h"
__global__ void kEltwiseL2SVMCost(float* ydata, float* ldata, float* pre_grad, float* all_cost, float a, float b, int numCases, int numTasks, int per_thread_case) {
const int task_id = blockIdx.x;
const int start_tx = threadIdx.x * per_thread_case;
const int end_tx = min(start_tx + per_thread_case, numCases);
if (task_id >= numTasks) {
return;
}
for (int c_id = start_tx; c_id < end_tx; ++c_id) {
int pos = task_id * numCases + c_id;
float tmp = fmaxf(a - ydata[pos] * (ldata[pos] - b), 0);
pre_grad[pos] = tmp;
all_cost[pos] = tmp*tmp;
}
}
|
112
|
/* Kernel that computes the gradient of an image, being the gradient
* the difference between the neighbour pixels and the central pixel
* of a cluster.
*/
__global__ void d_Gradient(float *ptrInputImage, float *ptrGradientImage, int Nx, int Ny, int Nz, int Kx, int Ky, int Kz)
{
int i, j, k, linearIndex;
int Kradius_x, Kradius_y, Kradius_z;
//Kradius_x = Kx/2; Kradius_y = Ky/2; Kradius_z = Kz/2;
float output = 0, voxelValue = 0;
// The blocks are larger than the voxel to be processed to load the edges of the kernel
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Check if inside the image
if((y>=Ny)||(x>=Nx)||(z>=Nz)||(y<0)||(x<0)||(z<0))
{
return;
}
linearIndex = y + x*Ny + z*Nx*Ny; // col-wise stored matrix
// Get the voxel value:
voxelValue = ptrInputImage[linearIndex];
// Process only the voxels inside the processing window
if((y>=Kradius_y)&&(x>=Kradius_x)&&(z>=Kradius_z)&&(y<(Ny-Ky))&&(x<Nx-Kradius_x)&&(z<Nz-Kradius_z))
{
#pragma unroll
for(i = 0; i < Kx; i++)
{
#pragma unroll
for(j = 0; j < Ky; j++)
{
#pragma unroll
for(k = 0; k < Kz; k++)
{
// Sum of differences
linearIndex = (y-Kradius_y+j) + (x-Kradius_x+i)*Ny + (z-Kradius_z+k)*Ny*Nz;
output += ptrInputImage[linearIndex]-voxelValue;
}
}
}
ptrGradientImage[linearIndex] = output;
}
}
|
113
|
/*
* A tutorial program for cuda programming. It implement algorithm of tensordot operation.
*
* by Steven Liu <stevenliucx@gmail.com>
* Nov 26, 2017
*
*/
#include <stdio.h>
#include <cuda.h>
#include <unistd.h>
#include <time.h>
#include <stdarg.h>
#include <math.h>
#include <iostream>
#include <vector>
clock_t time_start;
void log_info(const char* format, ...)
{
clock_t t = clock() - time_start;
static clock_t last = 0;
if(last == 0)
last = t;
printf("------- [%12.3fms][%12.3fms] ", (float)t/(CLOCKS_PER_SEC/1000.0), (float)(t-last)/(CLOCKS_PER_SEC/1000.0));
va_list ap;
va_start(ap, format);
vprintf(format, ap);
printf("\n");
va_end(ap);
fflush(stdout);
last = t;
}
/*
* A macro for check cuda function return status. It will call checkCuda_func.
*/
#define checkCuda(ret) checkCuda_func( (cudaError_t)(ret), __FILE__, __LINE__ )
/*
* Check cuda return status. exit program when error occur.
*/
inline cudaError_t checkCuda_func(cudaError_t ret, const char * file, const int line)
{
if(ret != cudaSuccess) {
fprintf(stderr, "cuda operation returned: %s (code %d), in file: %s(%d), the program (pid: %d) exit.\n",
cudaGetErrorString(ret), ret, file, line, getpid());
fflush(stderr);
exit(-1);
}
return ret;
}
/*
* Define a type for shape of tensor.
*/
typedef std::vector<size_t> Shape;
/*
* Get num of elements of shape.
*/
size_t get_shape_size(Shape shape)
{
size_t n_elems = 1;
for(size_t i:shape)
n_elems *= i;
return n_elems;
}
float *init_tensor(Shape shape, float default_val)
{
float *p;
size_t n_elems = get_shape_size(shape);
// log_info("n_elems: %d", n_elems);
p = (float *)malloc(n_elems*sizeof(float));
for(int i=0; i<n_elems; i++)
p[i] = default_val;
return p;
}
/*
* Kernel function for 3D tensor dot. Just for 3D dot.
* Shape is: A[n0, n1, n2] * B[n1, n2, n3] = C[n0, n3]
*/
__global__ void kernel_tensor3D_dot(float* d_TA, float* d_TB, float *d_TC,
size_t n0, size_t n1, size_t n2, size_t n3)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
float tmp = 0.0;
// Check if out of bound of TC
if (tx >= n0||ty >= n3)
return;
for(int j=0; j<n1; j++)
for(int k=0; k<n2; k++)
tmp += d_TA[tx*n1*n2+j*n2+k] * d_TB[j*n2*n3+k*n3+ty];
d_TC[tx*n3+ty] = tmp;
}
/*
* Cuda version of tensor dot operation.
*/
void tensor3D_dot_cu(float *TA, Shape shapeA, float *TB,
Shape shapeB, float *TC, Shape shapeC)
{
float *d_TA, *d_TB, *d_TC; // corresponding device memory pointer.
log_info("cudaMalloc");
checkCuda( cudaMalloc(&d_TA, get_shape_size(shapeA)*sizeof(float)) );
checkCuda( cudaMalloc(&d_TB, get_shape_size(shapeB)*sizeof(float)) );
checkCuda( cudaMalloc(&d_TC, get_shape_size(shapeC)*sizeof(float)) );
log_info("copy data to device");
checkCuda( cudaMemcpy(d_TA, TA, get_shape_size(shapeA)*sizeof(float),
cudaMemcpyHostToDevice) );
log_info("size: %d", get_shape_size(shapeB)*sizeof(float));
checkCuda( cudaMemcpy(d_TB, TB, get_shape_size(shapeB)*sizeof(float),
cudaMemcpyHostToDevice) );
dim3 dimBlock(32, 32);
// It is a algorithm trick. For get ceiling(M/N), you can caclulate: (M-1)/N + 1
// Ref: <https://stackoverflow.com/questions/2745074/fast-ceiling-of-an-integer-division-in-c-c>
dim3 dimGrid((shapeC[0]-1)/dimBlock.x+1, (shapeC[1]-1)/dimBlock.y+1);
log_info("launch kernel");
kernel_tensor3D_dot<<<dimGrid, dimBlock>>>(d_TA, d_TB, d_TC,
shapeA[0], shapeA[1], shapeA[2], shapeB[2]);
cudaDeviceSynchronize();
checkCuda( cudaGetLastError() );
log_info("kernel execution successfully");
log_info("copy back result in device to host memory");
checkCuda( cudaMemcpy(TC, d_TC, get_shape_size(shapeC)*sizeof(float),
cudaMemcpyDeviceToHost) );
checkCuda( cudaFree(d_TA) );
checkCuda( cudaFree(d_TB) );
checkCuda( cudaFree(d_TC) );
}
/*
* CPU version of tensor dot operation to compare performance with cuda version.
*/
void tensor3D_dot_cpu(float *TA, Shape shapeA, float *TB,
Shape shapeB, float *TC, Shape shapeC)
{
float tmp;
size_t n0 = shapeA[0], n1 = shapeA[1], n2 = shapeA[2], n3 = shapeC[1];
for(size_t i=0; i<n0; i++)
for(size_t l=0; l<n3; l++) {
tmp = 0.0;
for(size_t j=0; j<n1; j++)
for(size_t k=0; k<n2; k++)
tmp += TA[i*n1*n2+j*n2+k] * TB[j*n2*n3+k*n3+l];
TC[i*n3+l] = tmp;
}
}
void display_array(float *p, size_t n, size_t offset=0)
{
std::cout << "**first " << n << "(+" << offset << ") results** : [ ";
p += offset;
for(int i=0; i < n; i++)
std::cout << (int)p[i] << " ";
std::cout << " ] " << std::endl;
}
/*
* get sum of all elements of an array.
* return double precision to prevent overflow.
*/
double sum_array(float *p, size_t n)
{
double s = 0.0;
for(int i=0; i<n; i++, p++)
s += *p;
return s;
}
/*
* MAIN FUNCTION
*/
int main(int argc, char *argv[])
{
const float init_val_A = 2.0;
const float init_val_B = 1.5;
time_start = clock();
log_info("program start");
Shape shapeA = {1024,32,128};
Shape shapeB = {32,128,1024};
Shape shapeC = {shapeA[0], shapeB[2]};
log_info("initialize tensor");
float *TA = init_tensor(shapeA, init_val_A);
float *TB = init_tensor(shapeB, init_val_B);
float *TC_cu = init_tensor(shapeC, 0.0);
float *TC_cpu = init_tensor(shapeC, 0.0);
log_info("tensor3D_dot_cu");
tensor3D_dot_cu(TA, shapeA, TB, shapeB, TC_cu, shapeC);
log_info("tensor3D_dot_cu OK");
log_info("tensor3D_dot_cpu");
tensor3D_dot_cpu(TA, shapeA, TB, shapeB, TC_cpu, shapeC);
log_info("tensor3D_dot_cpu OK");
// Check correctness of calculation.
double t1, t2;
t1 = sum_array(TC_cu, get_shape_size(shapeC));
// t2 is correct answer in theroy.
t2 = init_val_A*init_val_B*shapeA[1]*shapeA[2]*get_shape_size(shapeC);
// to compare two double number equal: |t1 - t2| < 1e-15
// 1e-15 is too small number, near zero.
log_info("sum of TC_cu: %f, correct answer: %f, equal: %s",
t1, t2, fabs(t1-t2)<1e-15?"TRUE":"FLASE");
t1 = sum_array(TC_cpu, get_shape_size(shapeC));
log_info("sum of TC_cpu: %f, correct answer: %f, equal: %s",
t1, t2, fabs(t1-t2)<1e-15?"TRUE":"FLASE");
// display first n elements for check.
display_array(TA, 100);
display_array(TB, 100);
display_array(TC_cu, 100);
display_array(TC_cpu, 100);
free(TA);
free(TB);
free(TC_cu);
free(TC_cpu);
log_info("program end");
}
|
114
|
#include "includes.h"
/***********************************************************
By Huahua Wang, the University of Minnesota, twin cities
***********************************************************/
__global__ void matsub( float* X, float* Y, unsigned int size)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
X[i] -= Y[i];
}
}
|
115
|
/*
* Check grid and block dimensions
*/
#include<stdio.h>
__global__ void checkIndex(void)
{
printf("threadIdx : (%d,%d,%d) blockIdx : (%d,%d,%d)) blockDim : (%d,%d,%d) gridDim : (%d,%d,%d)\n ",threadIdx.x,threadIdx.y,threadIdx.z,blockIdx.x,blockIdx.y,blockIdx.z,blockDim.x,blockDim.y,blockDim.z,gridDim.x,gridDim.y,gridDim.z);
}
int main(void)
{ int n=6;
dim3 block(3);
dim3 grid((n+block.x-1)/block.x);
checkIndex<<<grid,block>>>();
cudaDeviceReset();
}
|
116
|
#include "includes.h"
__global__ void kernel_extract_roi(float* input, float* output, char* mean, const int input_w, const int output_w, const int output_h, const int in_plane_r, const int in_plane_g, const int in_plane_b, const int out_plane_r, const int out_plane_g, const int out_plane_b, const int bbox_x, const int bbox_y, const int bbox_w, const int bbox_h)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if( x < output_w && y < output_h)
{
float r[2] = { float(x) * bbox_w / output_w + bbox_x,
float(y) * bbox_h / output_h + bbox_y };
int pos[4][2] = { { int(floor(r[0])), int(floor(r[1])) },
{ int( ceil(r[0])), int(floor(r[1])) },
{ int(floor(r[0])), int(ceil(r[1])) },
{ int( ceil(r[0])), int(ceil(r[1])) } };
float u = r[0]-floor(r[0]);
float v = r[1]-floor(r[1]);
float s[4] = { (1-u)*(1-v), u*(1-v), (1-u)*v, u*v };
int map[4] = { pos[0][1]*input_w + pos[0][0], pos[1][1]*input_w + pos[1][0],
pos[2][1]*input_w + pos[2][0], pos[3][1]*input_w + pos[3][0]};
int idx = y * output_w + x;
output[idx+out_plane_r] = round( s[0]*input[map[0]+in_plane_r]
+ s[1]*input[map[1]+in_plane_r]
+ s[2]*input[map[2]+in_plane_r]
+ s[3]*input[map[3]+in_plane_r] );// float(mean[idx+out_plane_r]));
output[idx+out_plane_g] = round( s[0]*input[map[0]+in_plane_g]
+ s[1]*input[map[1]+in_plane_g]
+ s[2]*input[map[2]+in_plane_g]
+ s[3]*input[map[3]+in_plane_g] );//float(mean[idx+out_plane_g]));
output[idx+out_plane_b] = round( s[0]*input[map[0]+in_plane_b]
+ s[1]*input[map[1]+in_plane_b]
+ s[2]*input[map[2]+in_plane_b]
+ s[3]*input[map[3]+in_plane_b] );//float(mean[idx+out_plane_b]));
}
}
|
117
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <device_launch_parameters.h>
#define N 32 //allocate space for vars; this will end up being the number of blocks to iterate over (we want this to be multiples of 32)
__global__ void Caps(char *c, int *b)
{
int tid = blockIdx.x;
if (tid < N)
{
if (b[tid] == 1)
{
int ascii = (int)c[tid];
ascii -= 32;
c[tid] = (char)ascii;
}
}
}
int main()
{
int a[] = {1, 4, 6, 8, 11, 30};
char String[N];
int *b;
char *c;
//geneate 32 character string
for(int i=0;i<N;i++){
if (i % 5 == 0) { String[i] = 'a'; }
if (i % 5 == 1) { String[i] = 'b'; }
if (i % 5 == 2) { String[i] = 'c'; }
if (i % 5 == 3) { String[i] = 'd'; }
if (i % 5 == 4) { String[i] = 'e'; }
}
int temp[sizeof(String)/sizeof(char)];
for (int i = 0; i < (sizeof(String)/sizeof(char)); i++)
{
temp[i]=0;
}
for (int i = 0; i < (sizeof(a)/sizeof(int)); i++)
{
int val=a[i];
temp[val]=1;
}
cudaMalloc((void**)&c, N * sizeof(char));
cudaMalloc((void**)&b, N * sizeof(int));
cudaMemcpy(b, &temp, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(c, &String, N * sizeof(char), cudaMemcpyHostToDevice);
Caps<<<N, 1>>>(c, b);
cudaMemcpy(&String, c, N * sizeof(char), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
printf("%c", String[i]);
}
printf("\n");
cudaFree(b);
cudaFree(c);
return 0;
}
|
118
|
#include <cuda_runtime.h>
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h>
#include <math.h>
#include <unistd.h>
__constant__ int d_n_b[128];
__constant__ double d_mu_nu[128];
//Define some hyperparameters for convenience and clarity.
#define p_init_bound 0.5
#define tau0_init 0.005
#define gamma0_init 0
#define gamma1_init -0.3
#define phi_init 5
#define xi_pi 2
#define sigma2_alpha 5
#define sigma2_nu 5
#define sigma2_delta 5
#define sigma2_gamma0 3
#define kappa_gamma1 0.001
#define tau_gamma1 0.01
#define kappa_phi 1
#define tau_phi 0.1
#define a_p 1
#define b_p 3
#define a_tau0 2
#define b_tau0 0.01
#define tau1 100.0
__device__ double rgamma(curandState_t* state, double a, double scale){
/* Constants : */
const double sqrt32 = 5.656854;
const double exp_m1 = 0.36787944117144232159;/* exp(-1) = 1/e */
/* Coefficients q[k] - for q0 = sum(q[k]*a^(-k))
* Coefficients a[k] - for q = q0+(t*t/2)*sum(a[k]*v^k)
* Coefficients e[k] - for exp(q)-1 = sum(e[k]*q^k)
*/
const double q1 = 0.04166669;
const double q2 = 0.02083148;
const double q3 = 0.00801191;
const double q4 = 0.00144121;
const double q5 = -7.388e-5;
const double q6 = 2.4511e-4;
const double q7 = 2.424e-4;
const double a1 = 0.3333333;
const double a2 = -0.250003;
const double a3 = 0.2000062;
const double a4 = -0.1662921;
const double a5 = 0.1423657;
const double a6 = -0.1367177;
const double a7 = 0.1233795;
/* State variables [FIXME for threading!] :*/
double aa = 0.;
double aaa = 0.;
double s, s2, d; /* no. 1 (step 1) */
double q0, b, si, c;/* no. 2 (step 4) */
double e, p, q, r, t, u, v, w, x, ret_val;
if(scale == 0||a == 0) return 0;
curandState_t thread_state = *state;
if (a < 1.) { /* GS algorithm for parameters a < 1 */
e = 1.0 + exp_m1 * a;
while (1) {
p = e * curand_uniform_double(&thread_state);
if (p >= 1.0) {
x = -log((e - p) / a);
if (-log(curand_uniform_double(&thread_state)) >= (1.0 - a) * log(x))
break;
} else {
x = exp(log(p) / a);
if (-log(curand_uniform_double(&thread_state)) >= x)
break;
}
}
*state = thread_state;
return scale * x;
}
/* --- a >= 1 : GD algorithm --- */
/* Step 1: Recalculations of s2, s, d if a has changed */
if (a != aa) {
aa = a;
s2 = a - 0.5;
s = sqrt(s2);
d = sqrt32 - s * 12.0;
}
/* Step 2: t = standard normal deviate,
x = (s,1/2) -normal deviate. */
/* immediate acceptance (i) */
t = curand_normal_double(&thread_state);
x = s + 0.5 * t;
ret_val = x * x;
if (t >= 0.0){
*state = thread_state;
return scale * ret_val;
}
/* Step 3: u = 0,1 - uniform sample. squeeze acceptance (s) */
u = curand_uniform_double(&thread_state);
if (d * u <= t * t * t){
*state = thread_state;
return scale * ret_val;
}
/* Step 4: recalculations of q0, b, si, c if necessary */
if (a != aaa) {
aaa = a;
r = 1.0 / a;
q0 = ((((((q7 * r + q6) * r + q5) * r + q4) * r + q3) * r
+ q2) * r + q1) * r;
/* Approximation depending on size of parameter a */
/* The constants in the expressions for b, si and c */
/* were established by numerical experiments */
if (a <= 3.686) {
b = 0.463 + s + 0.178 * s2;
si = 1.235;
c = 0.195 / s - 0.079 + 0.16 * s;
} else if (a <= 13.022) {
b = 1.654 + 0.0076 * s2;
si = 1.68 / s + 0.275;
c = 0.062 / s + 0.024;
} else {
b = 1.77;
si = 0.75;
c = 0.1515 / s;
}
}
/* Step 5: no quotient test if x not positive */
if (x > 0.0) {
/* Step 6: calculation of v and quotient q */
v = t / (s + s);
if (fabs(v) <= 0.25)
q = q0 + 0.5 * t * t * ((((((a7 * v + a6) * v + a5) * v + a4) * v
+ a3) * v + a2) * v + a1) * v;
else
q = q0 - s * t + 0.25 * t * t + (s2 + s2) * log(1.0 + v);
/* Step 7: quotient acceptance (q) */
if (log(1.0 - u) <= q){
*state = thread_state;
return scale * ret_val;
}
}
while (1) {
/* Step 8: e = standard exponential deviate
* u = 0,1 -uniform deviate
* t = (b,si)-double exponential (laplace) sample */
e = -log(curand_uniform_double(&thread_state));
u = curand_uniform_double(&thread_state);
u = u + u - 1.0;
if (u < 0.0)
t = b - si * e;
else
t = b + si * e;
/* Step 9: rejection if t < tau(1) = -0.71874483771719 */
if (t >= -0.71874483771719) {
/* Step 10: calculation of v and quotient q */
v = t / (s + s);
if (fabs(v) <= 0.25)
q = q0 + 0.5 * t * t *
((((((a7 * v + a6) * v + a5) * v + a4) * v + a3) * v
+ a2) * v + a1) * v;
else
q = q0 - s * t + 0.25 * t * t + (s2 + s2) * log(1.0 + v);
/* Step 11: hat acceptance (h) */
/* (if q not positive go to step 8) */
if (q > 0.0) {
w = expm1(q);
/* ^^^^^ original code had approximation with rel.err < 2e-7 */
/* if t is rejected sample again at step 8 */
if (c * fabs(u) <= w * exp(e - 0.5 * t * t))
break;
}
}
} /* repeat .. until `t' is accepted */
x = s + 0.5 * t;
*state = thread_state;
return scale * x * x;
}
__device__ int rnbinom(curandState_t* state, double mu, double phi){
curandState_t thread_state = *state;
double scale = mu / phi;
// Sample Gamma dist.
double gamma = rgamma(&thread_state, phi, scale);
// Sample Poisson dist.
int pois = curand_poisson(&thread_state, gamma);
*state = thread_state;
return (pois);
}
__device__ double rbeta(curandState_t* state, double alpha, double beta){
curandState_t thread_state = *state;
double A = rgamma(&thread_state, alpha, 1);
double B = rgamma(&thread_state, beta, 1);
*state = thread_state;
return (A/(A+B));
}
__global__ void initialize_curand(int seed, curandState_t* d_states, int rand_len){
int pos = blockIdx.x*blockDim.x + threadIdx.x;
if (pos<rand_len)
curand_init(seed, pos, 0, &d_states[pos]);
}
template <class dataType>
__device__ void warpReduce(volatile dataType *sdata, int tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
template <class dataType>
__global__ void sum_on_gpu(dataType *arr, dataType *sum, int n_elem){
extern __shared__ double smem[];
dataType *block_level_mem = (dataType *)smem;
int tid = threadIdx.x;
int pos = blockIdx.x * blockDim.x + tid;
int stride = blockDim.x * gridDim.x;
block_level_mem[tid] = 0;
while (pos < n_elem){
block_level_mem[tid] += arr[pos];
pos += stride;
}
__syncthreads();
if (tid < 512)
block_level_mem[tid] += block_level_mem[tid + 512];
__syncthreads();
if (tid < 256)
block_level_mem[tid] += block_level_mem[tid + 256];
__syncthreads();
if (tid < 128)
block_level_mem[tid] += block_level_mem[tid + 128];
__syncthreads();
if (tid < 64)
block_level_mem[tid] += block_level_mem[tid + 64];
__syncthreads();
if (tid < 32)
warpReduce(block_level_mem, tid);
if(tid == 0)
*sum = block_level_mem[0];
}
__global__ void selective_mean_on_gpu(double* arr, double* mean, int* count, int n_elem){
extern __shared__ double block_level_mem[];
int tid = threadIdx.x;
int pos = blockIdx.x * blockDim.x + tid;
int stride = blockDim.x * gridDim.x;
block_level_mem[tid] = 0;
while(pos<n_elem){
block_level_mem[tid] += arr[pos];
pos += stride;
}
__syncthreads();
if (tid < 512)
block_level_mem[tid] += block_level_mem[tid + 512];
__syncthreads();
if (tid < 256)
block_level_mem[tid] += block_level_mem[tid + 256];
__syncthreads();
if (tid < 128)
block_level_mem[tid] += block_level_mem[tid + 128];
__syncthreads();
if (tid < 64)
block_level_mem[tid] += block_level_mem[tid + 64];
__syncthreads();
if (tid < 32)
warpReduce<double>(block_level_mem, tid);
if(tid == 0){
if (*count == 0)
*mean = 0;
else
*mean = block_level_mem[0]/count[0];
}
}
__global__ void mean_on_gpu(double *arr, double *mean, int n_elem){
extern __shared__ double block_level_mem[];
int tid = threadIdx.x;
int pos = blockIdx.x * blockDim.x + tid;
int stride = blockDim.x * gridDim.x;
block_level_mem[tid] = 0;
while(pos<n_elem){
block_level_mem[tid] += arr[pos];
pos += stride;
}
__syncthreads();
if (tid < 512)
block_level_mem[tid] += block_level_mem[tid + 512];
__syncthreads();
if (tid < 256)
block_level_mem[tid] += block_level_mem[tid + 256];
__syncthreads();
if (tid < 128)
block_level_mem[tid] += block_level_mem[tid + 128];
__syncthreads();
if (tid < 64)
block_level_mem[tid] += block_level_mem[tid + 64];
__syncthreads();
if (tid < 32)
warpReduce<double>(block_level_mem, tid);
if(tid == 0)
*mean = block_level_mem[0]/n_elem;
}
__global__ void mean_on_gpu(int *arr, double *mean, int n_elem){
extern __shared__ double smem[];
int* block_level_mem = (int*)smem;
int tid = threadIdx.x;
int pos = blockIdx.x * blockDim.x + tid;
int stride = blockDim.x * gridDim.x;
block_level_mem[tid] = 0;
while(pos<n_elem){
block_level_mem[tid] += arr[pos];
pos += stride;
}
__syncthreads();
if (tid < 512)
block_level_mem[tid] += block_level_mem[tid + 512];
__syncthreads();
if (tid < 256)
block_level_mem[tid] += block_level_mem[tid + 256];
__syncthreads();
if (tid < 128)
block_level_mem[tid] += block_level_mem[tid + 128];
__syncthreads();
if (tid < 64)
block_level_mem[tid] += block_level_mem[tid + 64];
__syncthreads();
if (tid < 32)
warpReduce<int>(block_level_mem, tid);
if(tid == 0)
*mean = block_level_mem[0]/(double)n_elem;
}
__device__ int get_batch(int i){
int b = 0;
int temp = d_n_b[b];
while (i >= temp){
b++;
temp += d_n_b[b];
}
return (b);
}
__global__ void fill_raw_means(int* d_Y, double* d_delta, int* d_W, double* d_temp_double, int* d_temp_int, int k, int g, int G, int n_b){
int i = threadIdx.x + blockIdx.x*blockDim.x;
//Please note that input pointers are adjusted by the sample offset.
if (i < n_b){
if (d_W[i] == k){
d_temp_double[i] = log(1 + d_Y[i*G + g] / exp(d_delta[i]));
d_temp_int[i] = 1;
}
else{
d_temp_double[i] = 0;
d_temp_int[i] = 0;
}
}
}
__global__ void first_gamma(double* d_gamma, int B){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos < B){
d_gamma[pos] = gamma0_init;
d_gamma[B + pos] = gamma1_init;
}
}
__global__ void first_pi(double* d_pi, int K, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos < bound){
d_pi[pos] = (pos % K +1)/(K*(K+1)/2.0); // pos % K is k = 0,1,2,...,K-1
}
}
__global__ void first_nu_phi(double* d_raw_means, double* d_nu, double* d_phi, int G, int K, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos<bound){
d_phi[pos] = phi_init;
int b = pos/G;
if (b ==0)
d_nu[pos] = 0;
else{
int g = pos % G;
double temp1 = d_raw_means[b*K*G + g];
double temp2 = d_raw_means[g];
for (int k=1; k<K; k++){
temp1 += d_raw_means[(b*K + k)*G + g];
temp2 += d_raw_means[k*G + g];
}
d_nu[pos] = (temp1 - temp2)/K;
}
}
}
__global__ void first_delta_W(curandState_t* d_states, double* d_pi, int* d_sum_per_cell, double* d_delta, int* d_W, int N, int K){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos<N){
int b = get_batch(pos);
//W
double u = curand_uniform_double(&d_states[pos]);
int k = 0;
double temp = d_pi[b*K];
while (u>temp){
k++;
temp += d_pi[b*K + k];
}
d_W[pos] = k;
//delta
int ref_pos = 0;
while (b>0){
b--;
ref_pos += d_n_b[b];
}
d_delta[pos] = log((double)d_sum_per_cell[pos]) - log((double)d_sum_per_cell[ref_pos]);
}
}
__global__ void first_beta_L(curandState_t* d_states, double* d_raw_means, double log_rat_base,
double* d_beta, int* d_L, int G, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
//beta and L
if (pos < bound){
if (pos < G){
d_beta[pos] = 0;
d_L[pos] = 0;
}
else{
int g = pos % G;
d_beta[pos] = d_raw_means[pos] - d_raw_means[g]; //pos is k*G + g
double log_rat = log_rat_base + pow(d_beta[pos],2.0)*(1/tau0_init-1/tau1)/2;
d_L[pos] = (curand_uniform_double(&d_states[pos]) > (1/(1+exp(log_rat))));
}
}
}
__global__ void first_Z(int* d_Z, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if(pos<bound)
d_Z[pos] = 0;
}
__global__ void fill_mu_nu(int* d_Y_special, double* d_delta_special, double* d_temp_double, int G, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos<bound){
int i = pos/G;
d_temp_double[pos] = log(1+d_Y_special[pos]/exp(d_delta_special[i]));
}
}
__global__ void first_mu_nu(double* d_mean, int bound){
int tid = threadIdx.x;
if (tid < bound)
d_mean[tid+1] -= d_mean[0];
}
__global__ void update_Z_X(curandState_t* d_states, int* d_Y, int* d_W, double* d_alpha, double* d_beta, double* d_nu,
double* d_delta, double* d_gamma, double* d_phi,
int* d_Z, int* d_X, int B, int N, int G){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos<N*G){ //pos is i*G + g
curandState_t thread_state = d_states[pos];
int i = pos/G;
int b = get_batch(i);
if(d_Y[pos] == 0){
if(d_X[pos] == 0)
d_Z[pos] = (curand_uniform_double(&thread_state)>1/(1+exp(d_gamma[b])));
else
d_Z[pos] = 1;
if(d_Z[pos] == 1){
int g = pos - i*G;
int k = d_W[i];
double log_mu = d_alpha[g] + d_beta[k*G+g] + d_nu[b*G + g] + d_delta[i];
int new_x = rnbinom(&thread_state, exp(log_mu),d_phi[b*G + g]);
double u =curand_uniform_double(&thread_state);
//Potential danger here, though not a lot.
if(u<=(1+exp(-d_gamma[b]-d_gamma[B+b]*d_X[pos]))/(1+exp(-d_gamma[b]-d_gamma[B+b]*new_x)))
d_X[pos] = new_x;
}
else
d_X[pos] = 0;
}
d_states[pos] = thread_state;
}
}
__global__ void propose_gamma(curandState_t* d_states, double* d_proposed_gamma, double* d_gamma,int B){
int b = threadIdx.x + blockIdx.x*blockDim.x;
if(b<B){
curandState_t thread_state = d_states[b];
d_proposed_gamma[b] = curand_normal_double(&thread_state)*0.1 + d_gamma[b];
d_proposed_gamma[B+b] = -rgamma(&thread_state, -10*d_gamma[B+b],0.1);
d_states[b] = thread_state;
}
}
__global__ void fill_prop_gamma0(double* d_proposed_gamma, double* d_gamma, int* d_Z_special, int* d_X_special,
double* d_temp_double, int b, int B, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;//bound is n_b*G + g.
if (pos < bound){
double proposal, previous;
double new_gamma0 = d_proposed_gamma[b];
double prev_gamma0 = d_gamma[b];
double gamma1 = d_gamma[B+b];
int X = d_X_special[pos];
int Z = d_Z_special[pos];
//Proposal
double temp = new_gamma0 + gamma1*X;
if (temp>0)
proposal = new_gamma0*Z - temp - log(1+exp(-temp));
else
proposal = new_gamma0*Z - log(1+exp(temp));
//Previous
temp = prev_gamma0 + gamma1*X;
if (temp>0)
previous = prev_gamma0*Z - temp - log(1+exp(-temp));
else
previous = prev_gamma0*Z - log(1+exp(temp));
d_temp_double[pos] = proposal - previous;
}
}
__global__ void fill_prop_gamma1(double* d_proposed_gamma, double* d_gamma, int* d_Z_special, int* d_X_special,
double* d_temp_double, int b, int B, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;//bound is n_b*G + g.
if (pos < bound){
double proposal, previous;
double new_gamma1 = d_proposed_gamma[B+b];
double prev_gamma1 = d_gamma[B+b];
double gamma0 = d_gamma[b];
int X = d_X_special[pos];
int Z = d_Z_special[pos];
//Proposal
double temp = gamma0 + new_gamma1*X;
if (temp>0)
proposal = new_gamma1*X*Z - temp - log(1+exp(-temp));
else
proposal = new_gamma1*X*Z - log(1+exp(temp));
//Previous
temp = gamma0 + prev_gamma1*X;
if (temp>0)
previous = prev_gamma1*X*Z - temp - log(1+exp(-temp));
else
previous = prev_gamma1*X*Z - log(1+exp(temp));
d_temp_double[pos] = proposal - previous;
}
}
__global__ void update_gamma0(curandState_t* d_states, double* d_proposed_gamma, double* d_log_rho, double* d_gamma, int B){
int b = threadIdx.x + blockIdx.x*blockDim.x;
if(b<B){
double logr_gamma0_prior = (pow(d_gamma[b], 2.0) - pow(d_proposed_gamma[b],2.0))/(2*sigma2_gamma0);
if (log(curand_uniform_double(&d_states[b])) <= (logr_gamma0_prior + d_log_rho[b]))
d_gamma[b] = d_proposed_gamma[b];
}
}
__global__ void update_gamma1(curandState_t* d_states, double* d_proposed_gamma, double* d_log_rho, double* d_gamma, int B){
int b = threadIdx.x + blockIdx.x*blockDim.x;
if(b<B){
double prev_gamma1 = -d_gamma[B+b];
double new_gamma1 = -d_proposed_gamma[B+b];
double logr_gamma1 = (kappa_gamma1 - 1)*(log(new_gamma1) - log(prev_gamma1))
+ tau_gamma1*(prev_gamma1 - new_gamma1)
- lgamma(10*new_gamma1) + (10*new_gamma1 - 1)*log(prev_gamma1) + 10*new_gamma1*(log(10.0)+1)
+ lgamma(10*prev_gamma1) - (10*prev_gamma1 - 1)*log(new_gamma1) - 10*prev_gamma1*(log(10.0)+1);
if (log(curand_uniform_double(&d_states[b])) <= (logr_gamma1 + d_log_rho[b]))
d_gamma[B+b] = -new_gamma1;
}
}
__global__ void propose_alpha(curandState_t* d_states, double* d_proposed_alpha, double* d_alpha, int G){
int g = threadIdx.x + blockIdx.x*blockDim.x;
if (g<G)
d_proposed_alpha[g] = curand_normal_double(&d_states[g])*0.1 + d_alpha[g];
}
__global__ void fill_prop_alpha(double* d_proposed_alpha, double* d_alpha, double* d_beta, double* d_nu, double* d_delta,
double* d_phi, int* d_W, int* d_X, double* d_temp_double, int g, int B, int N, int G){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<N){
int k = d_W[i];
int b = get_batch(i);
double phi = d_phi[b*G + g];
double mu = d_beta[k*G + g] + d_nu[b*G + g] + d_delta[i];//everything except alpha
int X = d_X[i*G + g];
double prev_alpha = d_alpha[g];
double new_alpha = d_proposed_alpha[g];
d_temp_double[i] = (log(phi + exp(prev_alpha + mu))
- log(phi + exp(new_alpha + mu)))
* (phi + X) + (new_alpha - prev_alpha)*X;
}
}
__global__ void update_alpha(curandState_t* d_states, double* d_proposed_alpha, double* d_log_rho, double* d_mu_alpha, double* d_alpha, int G){
int g = threadIdx.x + blockIdx.x*blockDim.x;
if (g<G){
double logr_alpha = (pow(d_alpha[g] - d_mu_alpha[g], 2.0)- pow(d_proposed_alpha[g] - d_mu_alpha[g], 2.0))/(2*sigma2_alpha);
if(log(curand_uniform_double(&d_states[g])) <= d_log_rho[g] + logr_alpha)
d_alpha[g] = d_proposed_alpha[g];
}
}
__global__ void update_L(curandState_t* d_states, double* d_p, double* d_beta_special, int* d_L_special, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos < bound){
double p = d_p[0];
double tau0 = d_p[1];
//Use [pos+G] to skip k=0.
double log_odd = log(p) - log(1-p)
- (log(tau1) - log(tau0))/2.0
+ pow(d_beta_special[pos],2.0)*(1/(2*tau0) - 1/(2*tau1));
if(curand_uniform_double(&d_states[pos]) > 1/(1+exp(log_odd)))
d_L_special[pos] = 1;
else
d_L_special[pos] = 0;
}
}
__global__ void update_p(curandState_t* d_states, int* d_count, double* d_p, int G, int K){
d_p[0] = rbeta(d_states, *d_count + a_p, G*(K-1) - *d_count + b_p);
}
__global__ void fill_I_L_beta_sq(int* d_L_special, double* d_beta_special, double* d_temp_double, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos < bound){//pos is (k-1)*G + g
if (d_L_special[pos] == 0)
d_temp_double[pos] = pow(d_beta_special[pos], 2.0);
else
d_temp_double[pos] = 0;
}
}
__global__ void update_tau0(curandState_t* d_states, int* d_count, double* d_p, int G, int K){
double a = a_tau0 + (G*(K-1) - d_count[0])/2.0;
double b = b_tau0 + d_p[1]/2.0;
d_p[1] = 1/rgamma(d_states, a, 1/b); //use 0 position
}
__global__ void propose_beta(curandState_t* d_states, double* d_proposed_beta, double* d_beta_special, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos < bound)
d_proposed_beta[pos] = curand_normal_double(&d_states[pos])*0.1 + d_beta_special[pos];
}
__global__ void fill_prop_beta(double* d_proposed_beta, double* d_beta, double* d_alpha, double* d_nu, double* d_delta,
double* d_phi, int* d_W, int* d_X, double* d_temp_double, int g, int k, int N, int G){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < N){
if (d_W[i] == k){
int b = get_batch(i);
double mu = d_alpha[g] + d_nu[b*G + g] + d_delta[i]; //mu other than beta
double new_beta = d_proposed_beta[(k-1)*G + g];
double prev_beta = d_beta[k*G + g];
double phi = d_phi[b*G + g];
int X = d_X[i*G + g];
d_temp_double[i] = (new_beta - prev_beta)*X
+ (phi + X) *(log(phi + exp(mu + prev_beta))
- log(phi + exp(mu + new_beta)));
}
else
d_temp_double[i] = 0;
}
}
__global__ void update_beta(curandState_t* d_states, double* d_proposed_beta, double* d_log_rho, int* d_L_special, double* d_beta_special, double* d_p, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos < bound){
double logr_beta;
if (d_L_special[pos] == 1) //pos is [(k-1)*G+g]
logr_beta = (pow(d_beta_special[pos],2.0) - pow(d_proposed_beta[pos], 2.0))/(2*tau1);
else
logr_beta = (pow(d_beta_special[pos],2.0) - pow(d_proposed_beta[pos], 2.0))/(2*d_p[1]);
if (log(curand_uniform_double(&d_states[pos])) <= d_log_rho[pos] + logr_beta)
d_beta_special[pos] = d_proposed_beta[pos];
}
}
__global__ void propose_nu(curandState_t* d_states, double* d_proposed_nu, double* d_nu_special, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos < bound)
d_proposed_nu[pos] = curand_normal_double(&d_states[pos])*0.1 + d_nu_special[pos];
}
__global__ void fill_prop_nu(double* d_proposed_nu, double* d_nu, double* d_alpha, double* d_beta, double* d_delta, double* d_phi,
int* d_W_special, int* d_X_special, double* d_temp_double, int b, int g, int G, int n_b){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < n_b){
double new_nu = d_proposed_nu[(b-1)*G + g];
double prev_nu = d_nu[b*G + g];
double mu = d_alpha[g] + d_beta[d_W_special[i]*G + g] + d_delta[i];//everything in mu except nu
double phi = d_phi[b*G + g];
int X = d_X_special[i*G + g];
d_temp_double[i] = (new_nu - prev_nu)*X
+ (phi + X)*(log(phi + exp(mu + prev_nu))
- log(phi + exp(mu + new_nu)));
}
}
__global__ void update_nu(curandState_t* d_states, double* d_proposed_nu, double* d_log_rho, double* d_nu_special, int G, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos < bound){ //pos is (b-1)*G + g
double logr_nu = (pow(d_nu_special[pos] - d_mu_nu[pos/G], 2.0) - pow(d_proposed_nu[pos] - d_mu_nu[pos/G], 2.0))/(2*sigma2_nu);
if (log(curand_uniform_double(&d_states[pos])) <= logr_nu + d_log_rho[pos])
d_nu_special[pos] = d_proposed_nu[pos];
}
}
__global__ void propose_delta(curandState_t* d_states, double* d_proposed_delta, double* d_delta, int N){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<N){
d_proposed_delta[i] = curand_normal_double(&d_states[i])*0.1 + d_delta[i];
}
}
__global__ void fill_prop_delta(double* d_proposed_delta, double* d_delta, double* d_alpha, double* d_beta, double* d_nu, double* d_phi,
int* d_W, int* d_X, double* d_temp_double, int i, int G){
int g = threadIdx.x + blockIdx.x*blockDim.x;
if (g<G){
int b = get_batch(i);
double mu = d_alpha[g] + d_beta[d_W[i]*G + g] + d_nu[b*G + g];
double phi = d_phi[b*G + g];
double new_delta = d_proposed_delta[i];
double prev_delta = d_delta[i];
int X = d_X[i*G + g];
d_temp_double[g] = (new_delta - prev_delta)*X
+ (phi + X)*(log(phi + exp(mu + prev_delta))
- log(phi + exp(mu + new_delta)));
}
}
__global__ void update_delta(curandState_t* d_states, double* d_proposed_delta, double* d_log_rho, double* d_mu_delta, double* d_delta, int N){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<N){
int j = i;
int b = 0;
while(j>0){
j -= d_n_b[b];
b++;
}
if (j!=0){
double logr_delta = (pow(d_delta[i] - d_mu_delta[i], 2.0)-pow(d_proposed_delta[i] - d_mu_delta[i], 2.0))/(2*sigma2_delta);
if (log(curand_uniform_double(&d_states[i])) <= logr_delta + d_log_rho[i])
d_delta[i] = d_proposed_delta[i];
}
}
}
__global__ void propose_phi(curandState_t* d_states, double* d_proposed_phi, double* d_phi, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos<bound){ //b*G+g
curandState_t thread_state = d_states[pos];
d_proposed_phi[pos] = rgamma(&thread_state,d_phi[pos],1);
d_states[pos] = thread_state;
}
}
__global__ void fill_prop_phi(double* d_proposed_phi_special, double* d_phi_special, double* d_alpha_special, double* d_beta_special,
double* d_nu_special, double* d_delta_special, int* d_W_special, int* d_X_special, double* d_temp_double,
int b, int G){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<d_n_b[b]){
double new_phi = *d_proposed_phi_special;
double prev_phi = *d_phi_special;
int X = d_X_special[i*G];//offset by sample_index*G + g
//offset alpha by g, beta by g, W by sample_index, nu by bg, delta by sample_index
double eta = exp(*d_alpha_special + d_beta_special[d_W_special[i]*G] + *d_nu_special + d_delta_special[i]);
d_temp_double[i] = lgamma(new_phi + X) + new_phi*log(new_phi)
- lgamma(new_phi) - (new_phi + X)*log(new_phi + eta)
+ lgamma(prev_phi) + (prev_phi + X)*log(prev_phi + eta)
- lgamma(prev_phi + X) - prev_phi*log(prev_phi);
}
}
__global__ void update_phi(curandState_t* d_states, double* d_proposed_phi, double* d_log_rho, double* d_phi, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos< bound){ //b*G+g
double new_phi = d_proposed_phi[pos];
double prev_phi = d_phi[pos];
double logr_phi = (kappa_phi - prev_phi)*log(new_phi)
- (kappa_phi - new_phi)*log(prev_phi)
+ (1 - tau_phi)*(new_phi - prev_phi)
+ lgamma(prev_phi) - lgamma(new_phi);
if (log(curand_uniform_double(&d_states[pos])) <= logr_phi + d_log_rho[pos])
d_phi[pos] = new_phi;
}
}
__global__ void propose_W(curandState_t* d_states, int* d_proposed_W, int N, int K){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<N){
double temp = curand_uniform_double(&d_states[i])*K;
int k = 0;
while (temp > k+1)
k++;
d_proposed_W[i] = k;
}
}
__global__ void fill_prop_W(int* d_proposed_W, int* d_W, double* d_alpha, double* d_beta, double* d_nu, double* d_delta,
double* d_phi, int* d_X, double* d_temp_double, int i, int G){
int g = threadIdx.x + blockIdx.x*blockDim.x;
if (g<G){
int bg = get_batch(i)*G + g;
double new_beta = d_beta[d_proposed_W[i]*G+g];
double prev_beta = d_beta[d_W[i]*G+g];
double mu = d_alpha[g] + d_nu[bg] + d_delta[i];//everything except beta
double phi = d_phi[bg];
d_temp_double[g] = (new_beta-prev_beta)*d_X[i*G+g]
+ (phi + d_X[i*G+g])*(log(phi + exp(mu + prev_beta))
- log(phi + exp(mu + new_beta)));
}
}
__global__ void update_W(curandState_t* d_states, int* d_proposed_W, double* d_log_rho, double* d_pi, int* d_W, int N, int K){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<N){
int b = get_batch(i);
int new_W = d_proposed_W[i];
if (log(curand_uniform_double(&d_states[i])) <= log(d_pi[b*K + new_W]/d_pi[b*K + d_W[i]]) + d_log_rho[i])
d_W[i] = new_W;
}
}
__global__ void fill_I_W(int* d_W_special, int* d_temp_int, int k, int n_b){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos<n_b){
if(d_W_special[pos] == k)
d_temp_int[pos] = 1;
else
d_temp_int[pos] = 0;
}
}
__global__ void update_pi(curandState_t* d_states, int* d_count, double* d_pi, int K){
//This is effectively just a dirichlet sampler
extern __shared__ double smem[];
int tid = threadIdx.x;
if (tid < K){
curandState_t thread_state = d_states[tid];
double d = d_count[tid] + xi_pi - 1/3.0;
while(1){
double z = curand_normal_double(&thread_state);
double v = pow((1 + z/(3*sqrt(d))),3.0);
if(v > 0 && log(curand_uniform_double(&thread_state))< 0.5*z*z + d *(1-v + log(v))){
d_states[tid] = thread_state; //Send back the latest state.
d_pi[tid] = d*v;
break;
}
}
if(tid==0){
smem[0] = 0;
for(int i=0; i<K; i++)
smem[0] += d_pi[i];
}
__syncthreads();
d_pi[tid] /= smem[0];
}
}
__global__ void store_gamma(double* d_gamma, double* d_preserved_gamma, int iter, int n_preserved, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos < bound) // bound is 0/1*B + b
d_preserved_gamma[pos*n_preserved + iter] = d_gamma[pos];
}
__global__ void store_alpha(double* d_alpha, double* d_preserved_alpha, int iter, int n_preserved, int G){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos<G) //pos is g
d_preserved_alpha[pos*n_preserved + iter] = d_alpha[pos];
}
__global__ void store_L_beta(int* d_L_special, int* d_preserved_L, double* d_beta_special, double* d_preserved_beta, int iter, int n_preserved, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos < bound){// pos is (k-1)*G + g
//Note that d_L and d_beta has been offset.
int new_pos = pos*n_preserved + iter;
d_preserved_L[new_pos] = d_L_special[pos];
d_preserved_beta[new_pos] = d_beta_special[pos];
}
}
__global__ void store_nu_phi(double* d_nu, double* d_preserved_nu, double* d_phi, double* d_preserved_phi, int iter, int n_preserved, int G, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos < bound){ //pos is b*G + g
if(pos>=G)
d_preserved_nu[(pos-G)*n_preserved + iter] = d_nu[pos];
d_preserved_phi[pos*n_preserved + iter] = d_phi[pos];
}
}
__global__ void store_delta_W(double* d_delta, double* d_preserved_delta, int* d_W, int* d_preserved_W, int iter, int n_preserved, int N){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos<N){ //pos is i
int new_pos= pos*n_preserved + iter;
d_preserved_delta[new_pos] = d_delta[pos];
d_preserved_W[new_pos] = d_W[pos];
}
}
__global__ void store_pi(double* d_pi, double* d_preserved_pi, int iter, int n_preserved, int bound){
int pos = threadIdx.x + blockIdx.x*blockDim.x;
if (pos<bound) //pos is b*K + k
d_preserved_pi[pos*n_preserved + iter] = d_pi[pos];
}
__global__ void fill_post_W(int* d_preserved_W, int* d_temp_int, int k, int n_preserved){
int iter = threadIdx.x + blockIdx.x*blockDim.x;
if (iter<n_preserved)
d_temp_int[iter] = (d_preserved_W[iter] == k);
}
__global__ void mode_on_gpu(int* d_sum, int* d_post, int length, int types){
int pos = blockIdx.x*blockDim.x + threadIdx.x;
if (pos<length){
int cur_type = 0;
int cur_max = d_sum[pos];//d_sum in the format of [k*N + i]
int value;
for (int k=1; k<types; k++){
value = d_sum[k*length+pos];
if(value>cur_max){
cur_type = k;
cur_max = value;
}
}
d_post[pos] = cur_type;
}
}
__global__ void binary_mode_on_gpu(int* d_sum, int* d_post, int length, int max){
int pos = blockIdx.x*blockDim.x + threadIdx.x;
if (pos<length)
d_post[pos] = (d_sum[pos]*2>max);
}
__global__ void fill_log_likelihood_1(double* d_alpha, double* d_beta_special, double* d_nu_special, double* d_delta_special,
double* d_gamma_0, double* d_gamma_1, double* d_phi_special,
int* d_Y_special, double* d_temp_double, int G){
int g = threadIdx.x + blockIdx.x*blockDim.x;
if (g < G){
double mu = exp(d_alpha[g] + d_beta_special[g] + d_nu_special[g] + *d_delta_special);
double phi = d_phi_special[g];
int Y = d_Y_special[g];
double pbgk = mu/(mu + phi);
double logp, log1mp;
if (pbgk < exp(-250.0)){
logp = -250;
log1mp = log(1-pbgk);
}else if (1-pbgk < exp(-250.0)){
logp = log(pbgk);
log1mp = -250;
}else{
logp = log(pbgk);
log1mp = log(1-pbgk);
}
if (Y){
d_temp_double[g] = - log(1 + exp(*d_gamma_0 + *d_gamma_1*Y))
+ lgamma(phi + Y) - lgamma((double)1+Y) - lgamma(phi)
+ Y * logp + phi * log1mp;
}else{
int x_max = (int)3 * mu; //basically a floor function
double sum_lr0 = phi * log1mp;
for (int x=1; x<x_max; x++){
double temp = *d_gamma_0 + *d_gamma_1 * x
- log(1 + exp(*d_gamma_0 + *d_gamma_1*x))
+ lgamma(phi + x) - lgamma((double)1+x) - lgamma(phi)
+ x * logp + phi * log1mp;
if(temp > sum_lr0)
sum_lr0 = temp + log(1 + exp(sum_lr0 - temp));
else
sum_lr0 += log(1 + exp(temp - sum_lr0));
}
d_temp_double[g] = sum_lr0;
}
}
}
__global__ void fill_log_likelihood_2(double* d_pi, double* d_log_likelihood_partial, double* d_temp_double, int N, int K){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<N){
int b = get_batch(i);
double sum = 0;
double max = d_log_likelihood_partial[i];
for (int k=1; k<K; k++){
if (d_log_likelihood_partial[k*N + i] > max)
max = d_log_likelihood_partial[k*N + i];
}
for (int k=0; k<K; k++){
sum += d_pi[b*K + k] * exp(d_log_likelihood_partial[k*N + i] - max);
}
d_temp_double[i] = max + log(sum);
}
}
int main(int argc, char **argv){
//BUSseq_gpu -b B -n n_b[0] n_b[2]...n_b[B-1] -g G -k K -c count_data.txt -iter 1000 -burn 300
int B = 4; //Number of batches
int n_b[200] = {300, 300, 200, 200}; // Sample size
int G = 3000; //Number of genomic locations
int K = 5; //Number of celltypes
int seed;
char count_data[200] = "count_data/demo_count.txt";
int n_iter = 4000; //Number of iterations
int n_burnin = 2000; //Number of burn-in iterations
int n_unchanged = 500;
int print_preserved = 0; //Default: does not print all preserved iterations.
char output_file[200] = "demo_output";
char n_file[200];
int n_file_flag = 0;
int burnin_flag = 1;
int unchanged_flag = 1;
int seed_flag = 1;
int opt;
while ((opt = getopt (argc, argv, "B:N:G:K:s:c:i:b:u:po:")) != -1){
switch(opt){
case 'B':
B = atoi(optarg);
break;
case 'N':
n_file_flag = 1;
strcpy(n_file, optarg);
break;
case 'G':
G = atoi(optarg);
break;
case 'K':
K = atoi(optarg);
break;
case 's':
seed_flag = 0;
seed = atoi(optarg);
break;
case 'c':
strcpy(count_data, optarg);
break;
case 'i':
n_iter = atoi(optarg);
break;
case 'b':
burnin_flag = 0;
n_burnin = atoi(optarg);
break;
case 'u':
unchanged_flag = 0;
n_unchanged = atoi(optarg);
break;
case 'p':
print_preserved = 1;
break;
case 'o':
strcpy(output_file, optarg);
break;
default:
printf("Error Usage: %s [-b batch] [-n sample_size_file] [-g gene] [-k celltype] [-c count_data_file)] [-i iterations] [-b burnin] [-u unchanged_iterations] [-p print_preserved] [-o output_prefix] \n", argv[0]);
exit(1);
}
}
if (argc > 1){
if (n_file_flag){
FILE* batch_file;
batch_file = fopen(n_file, "r");
for (int b=0; b<B; b++)
fscanf(batch_file, "%d", &n_b[b]);
fclose(batch_file);
}
if (burnin_flag){
n_burnin = n_iter/2;
}
if (unchanged_flag){
if (0.3*n_iter > 500)
n_unchanged = 0.3*n_iter;
else
n_unchanged = 500;
}
}
// Get seed
if (seed_flag){
struct timeval currentTime;
gettimeofday(¤tTime, NULL);
seed = (int) currentTime.tv_usec;
}
printf("Seed is %d\n", seed);
//Run checking on user input.
if(n_burnin > n_iter){
printf("Burn-in iterations must be less than total number of iterations.\n");
return (-1);
}
if(B<2){
printf("The batch number must be greater than one.\n");
return (-1);
}
for (int b=0; b<B; b++){
if (n_b[b]<K){
printf("The sample size in any batch must be greater than the assumed cell type number.\n");
return (-1);
}
}
//Define some parameters.
int N = 0;
for (int b=0; b<B; b++) N += n_b[b];
cudaMemcpyToSymbol(d_n_b, n_b, B*sizeof(int));
int n_preserved = n_iter - n_burnin;
int threads_per_block = 1024;
//Read file.
printf("Start reading file.\n");
FILE* myFile;
myFile = fopen(count_data,"r");
//char gene_names[G][32];
//Array designed in the way such that the i-th sample's g-th gene is Y[i*G + g]
int* h_Y = (int *)malloc(N * G * sizeof(int));
int* d_Y; cudaMalloc(&d_Y, N*G*sizeof(int));
//First, read in the gene names, which is the first row of the file.
//for (int g=0; g<G; g++)
// fscanf(myFile, "%s", gene_names[g]);
//Read counts
for (int i=0; i<N; i++)
for (int g=0; g<G; g++)
fscanf(myFile,"%d",&h_Y[i*G + g]);
fclose(myFile);
cudaMemcpy(d_Y, h_Y, N*G*sizeof(int), cudaMemcpyHostToDevice);
free(h_Y);
//1.Initialize
printf("Start initialization.\n");
//Initialize cuRandStates
int rand_len = N*G; //rand_len is the length of the longest random variable necessary.
curandState_t* d_states; cudaMalloc(&d_states, rand_len*sizeof(curandState_t));
initialize_curand <<< (rand_len-1)/threads_per_block + 1, threads_per_block>>>(seed, d_states, rand_len);
//Initialize first iteration.
srand(seed);
double p[2] = {((double)rand()*p_init_bound)/RAND_MAX, tau0_init}; //p and tau0
double* d_p; cudaMalloc(&d_p, 2*sizeof(double));
cudaMemcpy(d_p, p, 2*sizeof(double),cudaMemcpyHostToDevice);
//d_tau is defined as a constant.
double* d_gamma; cudaMalloc(&d_gamma, B*2*sizeof(double)); //[0/1*B + b]
double* d_pi; cudaMalloc(&d_pi, K*B*sizeof(double)); //[b*K + k]
double* d_phi; cudaMalloc(&d_phi, B*G*sizeof(double));//[b*G + g]
int* d_W; cudaMalloc(&d_W, N*sizeof(int));//[i]
int* d_sum_per_cell; cudaMalloc(&d_sum_per_cell, N*sizeof(int));
double* d_delta; cudaMalloc(&d_delta, N*sizeof(double));//[i]
double* d_alpha; cudaMalloc(&d_alpha, G*sizeof(double));//[g]
double* d_beta; cudaMalloc(&d_beta, G*K*sizeof(double));//[k*G + g]
int* d_L; cudaMalloc(&d_L, G*K*sizeof(int));//[k*G + g]
double log_rat_base = log(p[0]/(1-p[0])) + (log(tau0_init)- log(tau1))/2;
double* d_nu; cudaMalloc(&d_nu, B*G*sizeof(double));//[b*G + g]
int* d_Z; cudaMalloc(&d_Z, N*G*sizeof(int));//[i*G + g]
int* d_X; cudaMalloc(&d_X, N*G*sizeof(int));//[i*G + g]
double* d_mu_alpha; cudaMalloc(&d_mu_alpha, G*sizeof(double));
double* d_mu_delta; cudaMalloc(&d_mu_delta, N*sizeof(double));
double* d_raw_means; cudaMalloc(&d_raw_means, B*K*G*sizeof(double));//[b*K*G + k*G + g]
int* d_temp_int;
if(N>n_preserved)
cudaMalloc(&d_temp_int, N*sizeof(int));
else
cudaMalloc(&d_temp_int, n_preserved*sizeof(int));
double* d_temp_double; cudaMalloc(&d_temp_double, N*G*sizeof(double));
int* d_count; cudaMalloc(&d_count, K*N*sizeof(int));
double* d_mean; cudaMalloc(&d_mean, B*sizeof(double));
for(int i=0; i<N; i++)
sum_on_gpu <int> <<<1, threads_per_block, threads_per_block*sizeof(int)>>> (&d_Y[i*G], &d_sum_per_cell[i], G);
first_pi <<<(B*K-1)/ threads_per_block+1, threads_per_block>>> (d_pi, K, B*K);
first_delta_W <<<(N-1)/threads_per_block+1, threads_per_block>>> (d_states, d_pi, d_sum_per_cell, d_delta, d_W, N, K);
int sample_index = 0;
for (int b=0; b<B; b++){
if (b>0)
sample_index += n_b[b-1];
for (int k=0; k<K; k++){
for (int g=0; g<G; g++){
fill_raw_means <<<(n_b[b]-1)/threads_per_block + 1, threads_per_block>>> (&d_Y[sample_index*G], &d_delta[sample_index], &d_W[sample_index], d_temp_double, d_temp_int, k, g, G, n_b[b]);
sum_on_gpu <int> <<<1, threads_per_block, threads_per_block*sizeof(int)>>> (d_temp_int, d_count, n_b[b]);
selective_mean_on_gpu <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_raw_means[b*K*G + k*G + g], d_count, n_b[b]);
}
}
}
first_gamma <<<(B-1)/threads_per_block+1, threads_per_block>>> (d_gamma, B);
first_nu_phi <<<(B*G-1)/threads_per_block+1, threads_per_block>>> (d_raw_means, d_nu, d_phi, G, K, B*G);
first_beta_L <<<(G*K-1)/threads_per_block+1, threads_per_block>>> (d_states, d_raw_means, log_rat_base, d_beta, d_L, G, G*K);
first_Z <<<(N*G-1)/threads_per_block+1, threads_per_block>>> (d_Z, N*G);
cudaMemcpy(d_alpha, d_raw_means, G*sizeof(double), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_X, d_Y, N*G*sizeof(int), cudaMemcpyDeviceToDevice);//Because d_X is initialized to be the same as observed
cudaMemcpy(d_mu_alpha, d_raw_means, G*sizeof(double), cudaMemcpyDeviceToDevice);
sample_index = 0;
for (int b=0; b<B; b++){
fill_mu_nu <<<(n_b[b]*G-1)/threads_per_block + 1, threads_per_block>>> (&d_Y[sample_index*G], &d_delta[sample_index], d_temp_double, G, n_b[b]*G);
mean_on_gpu <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_mean[b], n_b[b]*G);
sample_index += n_b[b];
}
first_mu_nu <<<1, threads_per_block>>> (d_mean, B-1);
double* h_mu_nu = (double*)malloc((B-1)*sizeof(double));
cudaMemcpy(h_mu_nu, &d_mean[1], (B-1)*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpyToSymbol(d_mu_nu, h_mu_nu, (B-1)*sizeof(double));
free(h_mu_nu);
cudaMemcpy(d_mu_delta, d_delta, N*sizeof(double), cudaMemcpyDeviceToDevice);
//2.MCMC
printf("Start MCMC.\n");
//Declare some arrays before going into MCMC.
int log_rho_len;
if (B>K)
log_rho_len = G*B;
else
log_rho_len = G*K;
if (N>G)
log_rho_len = log_rho_len/G * N;
double* d_log_rho; cudaMalloc(&d_log_rho, log_rho_len*sizeof(double)); //To be used for all MH steps.
//I defined it to be G*K(or B). This is not a mistake, rest assured.
double* d_proposed_gamma; cudaMalloc(&d_proposed_gamma, B*2*sizeof(double));
double* d_proposed_alpha; cudaMalloc(&d_proposed_alpha, G*sizeof(double));
double* d_proposed_beta; cudaMalloc(&d_proposed_beta, G*(K-1)*sizeof(double));
double* d_proposed_nu; cudaMalloc(&d_proposed_nu, G*(B-1)*sizeof(double));
double* d_proposed_delta; cudaMalloc(&d_proposed_delta, N*sizeof(double));
double* d_proposed_phi; cudaMalloc(&d_proposed_phi, B*G*sizeof(double));
int* d_proposed_W; cudaMalloc(&d_proposed_W, N*sizeof(int));
for (int iter=0; iter<n_burnin; iter++){
//Z and X
update_Z_X <<<(N*G-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_Y, d_W, d_alpha, d_beta, d_nu,
d_delta, d_gamma, d_phi,
d_Z, d_X, B, N, G);//If there are not enough threads, may have to separate Z and X.
// gamma
propose_gamma <<<(B-1)/threads_per_block + 1, threads_per_block>>>(d_states, d_proposed_gamma, d_gamma,B);
//Fill gamma0
sample_index = 0;
for (int b=0; b<B; b++){
if (b>0)
sample_index += n_b[b-1];
fill_prop_gamma0 <<<(n_b[b]*G-1)/threads_per_block+1, threads_per_block>>> (d_proposed_gamma, d_gamma, &d_Z[sample_index*G],
&d_X[sample_index*G], d_temp_double, b, B, n_b[b]*G);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_log_rho[b], n_b[b]*G);
}
update_gamma0 <<< (B-1)/threads_per_block + 1, threads_per_block >>> (d_states, d_proposed_gamma, d_log_rho, d_gamma, B);
//Fill gamma1
sample_index = 0;
for (int b=0; b<B; b++){
if (b>0)
sample_index += n_b[b-1];
fill_prop_gamma1 <<<(n_b[b]*G-1)/threads_per_block+1, threads_per_block>>> (d_proposed_gamma, d_gamma, &d_Z[sample_index*G],
&d_X[sample_index*G], d_temp_double, b, B, n_b[b]*G);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_log_rho[b], n_b[b]*G);
}
update_gamma1 <<< (B-1)/threads_per_block + 1, threads_per_block >>> (d_states, d_proposed_gamma, d_log_rho, d_gamma, B);
//alpha
propose_alpha <<<(G-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_alpha, d_alpha, G);
for (int g=0; g<G; g++){
fill_prop_alpha <<< (N-1)/threads_per_block + 1, threads_per_block >>>(d_proposed_alpha, d_alpha, d_beta, d_nu, d_delta, d_phi,
d_W, d_X, d_temp_double, g, B, N, G);
sum_on_gpu <double> <<< 1, threads_per_block, threads_per_block*sizeof(double) >>>(d_temp_double, &d_log_rho[g], N);
}
update_alpha <<< (G-1)/threads_per_block + 1, threads_per_block >>>(d_states, d_proposed_alpha, d_log_rho, d_mu_alpha, d_alpha, G);
//L
update_L <<<(G*(K-1)-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_p, &d_beta[G], &d_L[G], G*(K-1));
//p
if(iter >= n_unchanged){
//Fill d_count[0] with sum of d_L and d_p[1] with sum of I(d_L==0)*d_beta^2
//I used d_p[1] to carry the second sum. Its value will be replaced anyway.
sum_on_gpu <int> <<<1, threads_per_block, threads_per_block*sizeof(int)>>> (&d_L[G], &d_count[0], G*(K-1));
fill_I_L_beta_sq <<<(G*(K-1)-1)/threads_per_block + 1, threads_per_block>>> (&d_L[G], &d_beta[G], d_temp_double, G*(K-1));
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_p[1], G*(K-1));
update_p <<<1,1>>> (d_states, d_count, d_p, G, K);
update_tau0 <<<1,1>>> (d_states, d_count, d_p, G, K);
}
//beta
propose_beta <<<(G*(K-1)-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_beta, &d_beta[G], G*(K-1));
for (int k=1; k<K; k++){
for (int g=0; g<G; g++){
fill_prop_beta <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_proposed_beta, d_beta, d_alpha, d_nu, d_delta,
d_phi, d_W, d_X, d_temp_double, g, k, N, G);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>>(d_temp_double, &d_log_rho[(k-1)*G + g], N);
}
}
update_beta <<<(G*(K-1)-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_beta, d_log_rho, &d_L[G], &d_beta[G], d_p, G*(K-1));
//nu
propose_nu <<<(G*(B-1)-1)/threads_per_block + 1, threads_per_block>>>(d_states, d_proposed_nu, &d_nu[G], G*(B-1));
sample_index = 0;
for (int b=1; b<B; b++){
sample_index += n_b[b-1];
for (int g=0; g<G; g++){
fill_prop_nu <<<(n_b[b]-1)/threads_per_block + 1, threads_per_block>>>(d_proposed_nu, d_nu, d_alpha, d_beta, &d_delta[sample_index], d_phi,
&d_W[sample_index], &d_X[sample_index*G], d_temp_double, b, g, G, n_b[b]);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_log_rho[(b-1)*G + g], n_b[b]);
}
}
update_nu <<<(G*(B-1)-1)/threads_per_block+1, threads_per_block>>> (d_states, d_proposed_nu, d_log_rho, &d_nu[G], G, G*(B-1));
//delta
propose_delta <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_delta, d_delta, N);
sample_index = 0;
for (int b=0; b<B; b++){
for (int i=1; i<n_b[b]; i++){
fill_prop_delta <<<(G-1)/threads_per_block + 1, threads_per_block>>> (d_proposed_delta, d_delta, d_alpha, d_beta, d_nu, d_phi,
d_W, d_X, d_temp_double, sample_index+i, G);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_log_rho[sample_index+i], G);
}
sample_index += n_b[b];
}
update_delta <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_delta, d_log_rho, d_mu_delta, d_delta, N);
//phi
propose_phi <<<(B*G-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_phi, d_phi, B*G);
sample_index = 0;
int bg = 0;
for (int b=0; b<B; b++){
if(b>0)
sample_index += n_b[b-1];
for (int g=0; g<G; g++){
fill_prop_phi <<<(n_b[b]-1)/threads_per_block + 1, threads_per_block>>> (&d_proposed_phi[bg], &d_phi[bg], &d_alpha[g], &d_beta[g], &d_nu[bg],
&d_delta[sample_index], &d_W[sample_index], &d_X[sample_index*G + g], d_temp_double, b, G);
sum_on_gpu <double> <<<1, threads_per_block , threads_per_block*sizeof(double)>>> (d_temp_double, &d_log_rho[bg], n_b[b]);
bg++;
}
}
update_phi <<<(B*G-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_phi, d_log_rho, d_phi, B*G);
//w
propose_W <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_W, N, K);
for (int i=0; i<N; i++){
fill_prop_W <<<(G-1)/threads_per_block + 1, threads_per_block>>> (d_proposed_W, d_W, d_alpha, d_beta, d_nu, d_delta,
d_phi, d_X, d_temp_double, i, G);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_log_rho[i], G);
}
update_W <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_W, d_log_rho, d_pi, d_W, N, K);
//pi
sample_index = 0;
for (int b=0; b<B; b++){
if(b>0)
sample_index += n_b[b-1];
for (int k=0; k<K; k++){
fill_I_W <<<(n_b[b]-1)/threads_per_block + 1, threads_per_block>>> (&d_W[sample_index], d_temp_int, k, n_b[b]);
sum_on_gpu <int> <<<1, threads_per_block, threads_per_block*sizeof(int)>>> (d_temp_int, &d_count[k], n_b[b]);
}
//Assuming that K is less than 1024.
update_pi <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_states, d_count, &d_pi[b*K], K);
}
}
printf("Burnin completed. Start recording iterations.\n");
//Declare stored iterations.
double* d_preserved_gamma; cudaMalloc(&d_preserved_gamma, B*2*n_preserved*sizeof(double));
double* d_preserved_alpha; cudaMalloc(&d_preserved_alpha, G*n_preserved*sizeof(double));
int* d_preserved_L; cudaMalloc(&d_preserved_L, G*(K-1)*n_preserved*sizeof(int));
double* d_preserved_p; cudaMalloc(&d_preserved_p, 2*n_preserved*sizeof(double));
double* d_preserved_beta; cudaMalloc(&d_preserved_beta, G*(K-1)*n_preserved*sizeof(double));
double* d_preserved_nu; cudaMalloc(&d_preserved_nu, G*(B-1)*n_preserved*sizeof(double));
double* d_preserved_delta; cudaMalloc(&d_preserved_delta, N*n_preserved*sizeof(double));
double* d_preserved_phi; cudaMalloc(&d_preserved_phi, B*G*n_preserved*sizeof(double));
int* d_preserved_W; cudaMalloc(&d_preserved_W, N*n_preserved*sizeof(int));
double* d_preserved_pi; cudaMalloc(&d_preserved_pi, B*K*n_preserved*sizeof(double));
for(int iter=0; iter<n_preserved; iter++){
//Z and X
update_Z_X <<<(N*G-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_Y, d_W, d_alpha, d_beta, d_nu,
d_delta, d_gamma, d_phi,
d_Z, d_X, B, N, G);//If there are not enough threads, may have to separate Z and X.
// gamma
propose_gamma <<<(B-1)/threads_per_block + 1, threads_per_block>>>(d_states, d_proposed_gamma, d_gamma,B);
//Fill gamma0
sample_index = 0;
for (int b=0; b<B; b++){
if (b>0)
sample_index += n_b[b-1];
fill_prop_gamma0 <<<(n_b[b]*G-1)/threads_per_block+1, threads_per_block>>> (d_proposed_gamma, d_gamma, &d_Z[sample_index*G],
&d_X[sample_index*G], d_temp_double, b, B, n_b[b]*G);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_log_rho[b], n_b[b]*G);
}
update_gamma0 <<< (B-1)/threads_per_block + 1, threads_per_block >>> (d_states, d_proposed_gamma, d_log_rho, d_gamma, B);
//Fill gamma1
sample_index = 0;
for (int b=0; b<B; b++){
if (b>0)
sample_index += n_b[b-1];
fill_prop_gamma1 <<<(n_b[b]*G-1)/threads_per_block+1, threads_per_block>>> (d_proposed_gamma, d_gamma, &d_Z[sample_index*G],
&d_X[sample_index*G], d_temp_double, b, B, n_b[b]*G);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_log_rho[b], n_b[b]*G);
}
update_gamma1 <<< (B-1)/threads_per_block + 1, threads_per_block >>> (d_states, d_proposed_gamma, d_log_rho, d_gamma, B);
//alpha
propose_alpha <<<(G-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_alpha, d_alpha, G);
for (int g=0; g<G; g++){
fill_prop_alpha <<< (N-1)/threads_per_block + 1, threads_per_block >>>(d_proposed_alpha, d_alpha, d_beta, d_nu, d_delta, d_phi,
d_W, d_X, d_temp_double, g, B, N, G);
sum_on_gpu <double> <<< 1, threads_per_block, threads_per_block*sizeof(double) >>>(d_temp_double, &d_log_rho[g], N);
}
update_alpha <<< (G-1)/threads_per_block + 1, threads_per_block >>>(d_states, d_proposed_alpha, d_log_rho, d_mu_alpha, d_alpha, G);
//L
update_L <<<(G*(K-1)-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_p, &d_beta[G], &d_L[G], G*(K-1));
//p
if(iter >= n_unchanged - n_burnin){
//Fill d_count[0] with sum of d_L and d_p[1] with sum of I(d_L==0)*d_beta^2
//I used d_p[1] to carry the second sum. Its value will be replaced anyway.
sum_on_gpu <int> <<<1, threads_per_block, threads_per_block*sizeof(int)>>> (&d_L[G], &d_count[0], G*(K-1));
fill_I_L_beta_sq <<<(G*(K-1)-1)/threads_per_block + 1, threads_per_block>>> (&d_L[G], &d_beta[G], d_temp_double, G*(K-1));
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_p[1], G*(K-1));
update_p <<<1,1>>> (d_states, d_count, d_p, G, K);
update_tau0 <<<1,1>>> (d_states, d_count, d_p, G, K);
}
//beta
propose_beta <<<(G*(K-1)-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_beta, &d_beta[G], G*(K-1));
for (int k=1; k<K; k++){
for (int g=0; g<G; g++){
fill_prop_beta <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_proposed_beta, d_beta, d_alpha, d_nu, d_delta,
d_phi, d_W, d_X, d_temp_double, g, k, N, G);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>>(d_temp_double, &d_log_rho[(k-1)*G + g], N);
}
}
update_beta <<<(G*(K-1)-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_beta, d_log_rho, &d_L[G], &d_beta[G], d_p, G*(K-1));
//nu
propose_nu <<<(G*(B-1)-1)/threads_per_block + 1, threads_per_block>>>(d_states, d_proposed_nu, &d_nu[G], G*(B-1));
sample_index = 0;
for (int b=1; b<B; b++){
sample_index += n_b[b-1];
for (int g=0; g<G; g++){
fill_prop_nu <<<(n_b[b]-1)/threads_per_block + 1, threads_per_block>>>(d_proposed_nu, d_nu, d_alpha, d_beta, &d_delta[sample_index], d_phi, &d_W[sample_index],
&d_X[sample_index*G], d_temp_double, b, g, G, n_b[b]);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_log_rho[(b-1)*G + g], n_b[b]);
}
}
update_nu <<<(G*(B-1)-1)/threads_per_block+1, threads_per_block>>> (d_states, d_proposed_nu, d_log_rho, &d_nu[G], G, G*(B-1));
//delta
propose_delta <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_delta, d_delta, N);
sample_index = 0;
for (int b=0; b<B; b++){
for (int i=1; i<n_b[b]; i++){
fill_prop_delta <<<(G-1)/threads_per_block + 1, threads_per_block>>> (d_proposed_delta, d_delta, d_alpha, d_beta, d_nu, d_phi,
d_W, d_X, d_temp_double, sample_index+i, G);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_log_rho[sample_index+i], G);
}
sample_index += n_b[b];
}
update_delta <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_delta, d_log_rho, d_mu_delta, d_delta, N);
//phi
propose_phi <<<(B*G-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_phi, d_phi, B*G);
sample_index = 0;
int bg = 0;
for (int b=0; b<B; b++){
if(b>0)
sample_index += n_b[b-1];
for (int g=0; g<G; g++){
fill_prop_phi <<<(n_b[b]-1)/threads_per_block + 1, threads_per_block>>> (&d_proposed_phi[bg], &d_phi[bg], &d_alpha[g], &d_beta[g], &d_nu[bg],
&d_delta[sample_index], &d_W[sample_index], &d_X[sample_index*G + g], d_temp_double, b, G);
sum_on_gpu <double> <<<1, threads_per_block , threads_per_block*sizeof(double)>>> (d_temp_double, &d_log_rho[bg], n_b[b]);
bg++;
}
}
update_phi <<<(B*G-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_phi, d_log_rho, d_phi, B*G);
//w
propose_W <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_W, N, K);
for (int i=0; i<N; i++){
fill_prop_W <<<(G-1)/threads_per_block + 1, threads_per_block>>> (d_proposed_W, d_W, d_alpha, d_beta, d_nu, d_delta,
d_phi, d_X, d_temp_double, i, G);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, &d_log_rho[i], G);
}
update_W <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_states, d_proposed_W, d_log_rho, d_pi, d_W, N, K);
//pi
sample_index = 0;
for (int b=0; b<B; b++){
if(b>0)
sample_index += n_b[b-1];
for (int k=0; k<K; k++){
fill_I_W <<<(n_b[b]-1)/threads_per_block + 1, threads_per_block>>> (&d_W[sample_index], d_temp_int, k, n_b[b]);
sum_on_gpu <int> <<<1, threads_per_block, threads_per_block*sizeof(int)>>> (d_temp_int, &d_count[k], n_b[b]);
}
//Assuming that K is less than 1024.
update_pi <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_states, d_count, &d_pi[b*K], K);
}
//Store iteration.
store_gamma <<<(B*2-1)/threads_per_block + 1, threads_per_block>>> (d_gamma, d_preserved_gamma, iter, n_preserved, B*2);
store_alpha <<<(G-1)/threads_per_block + 1, threads_per_block>>> (d_alpha, d_preserved_alpha, iter, n_preserved, G);
store_L_beta <<<(G*(K-1)-1)/threads_per_block + 1, threads_per_block>>>(&d_L[G], d_preserved_L, &d_beta[G], d_preserved_beta, iter, n_preserved, G*(K-1));
cudaMemcpy(&d_preserved_p[iter], &d_p[0], sizeof(double), cudaMemcpyDeviceToDevice);
cudaMemcpy(&d_preserved_p[n_preserved + iter], &d_p[1], sizeof(double), cudaMemcpyDeviceToDevice);
store_nu_phi <<<(B*G-1)/threads_per_block + 1, threads_per_block>>> (d_nu, d_preserved_nu, d_phi, d_preserved_phi, iter, n_preserved, G, B*G);
store_delta_W <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_delta, d_preserved_delta, d_W, d_preserved_W, iter, n_preserved, N);
store_pi <<<(B*K-1)/threads_per_block + 1, threads_per_block>>> (d_pi, d_preserved_pi, iter, n_preserved, B*K);
}
//3.Posterior Inference
printf("Start posterior inference.\n");
//reuse d_{sth} as d_post_{sth}
//Z
// Not necessary
//X
// Not necessary either
//gamma
for (int b=0; b<2*B; b++)
mean_on_gpu <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (&d_preserved_gamma[b*n_preserved], &d_gamma[b], n_preserved);
//alpha
for (int g=0; g<G; g++)
mean_on_gpu <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (&d_preserved_alpha[g*n_preserved], &d_alpha[g], n_preserved);
//L
double* d_post_prob; cudaMalloc(&d_post_prob, G*(K-1)*sizeof(double));
for (int pos=0; pos<G*(K-1); pos++)
mean_on_gpu <<<1, threads_per_block, threads_per_block*sizeof(int)>>> (&d_preserved_L[pos*n_preserved], &d_post_prob[pos], n_preserved);
//p and tau0
for (int pos=0; pos<2; pos++)
mean_on_gpu <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (&d_preserved_p[pos*n_preserved], &d_p[pos], n_preserved);
//beta
for (int pos=G; pos<G*K; pos++)//starts with G
mean_on_gpu <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (&d_preserved_beta[(pos-G)*n_preserved], &d_beta[pos], n_preserved);
//nu
for (int pos=G; pos<B*G; pos++)//starts with G
mean_on_gpu <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (&d_preserved_nu[(pos-G)*n_preserved], &d_nu[pos], n_preserved);
//delta
for (int i=0; i<N; i++)
mean_on_gpu <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (&d_preserved_delta[i*n_preserved], &d_delta[i], n_preserved);
//phi
for (int pos=0; pos<B*G; pos++)
mean_on_gpu <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (&d_preserved_phi[pos*n_preserved], &d_phi[pos], n_preserved);
//W
for (int k=0; k<K; k++){
for (int i=0; i<N; i++){
fill_post_W <<<(n_preserved-1)/threads_per_block + 1, threads_per_block>>> (&d_preserved_W[i*n_preserved], d_temp_int, k, n_preserved);
sum_on_gpu <int> <<<1, threads_per_block, threads_per_block*sizeof(int)>>> (d_temp_int, &d_count[k*N + i], n_preserved);
}
}
mode_on_gpu <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_count, d_W, N, K);
//pi
for (int pos=0; pos<B*K; pos++)
mean_on_gpu <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (&d_preserved_pi[pos*n_preserved], &d_pi[pos], n_preserved);
//4.Write output
printf("Start writing output files.\n");
//Z
int* h_post_Z = (int*)malloc(N*G*sizeof(int));
cudaMemcpy(h_post_Z, d_Z, N*G*sizeof(int), cudaMemcpyDeviceToHost);
char Z_output_filename[200];
strcpy(Z_output_filename, output_file);
strcat(Z_output_filename, "_post_Z.txt");
FILE *Z_output_file;
Z_output_file = fopen(Z_output_filename,"w");
for (int i=0; i<N; i++){
for (int g=0; g<G-1; g++)
fprintf(Z_output_file, "%d\t", h_post_Z[i*G + g]);
fprintf(Z_output_file, "%d\n", h_post_Z[i*G + (G-1)]);
}
fclose(Z_output_file);
free(h_post_Z);
//X
int* h_post_X = (int*)malloc(N*G*sizeof(int));
cudaMemcpy(h_post_X, d_X, N*G*sizeof(int), cudaMemcpyDeviceToHost);
char X_output_filename[200];
strcpy(X_output_filename, output_file);
strcat(X_output_filename, "_post_X.txt");
FILE *X_output_file;
X_output_file = fopen(X_output_filename,"w");
for (int i=0; i<N; i++){
for (int g=0; g<G-1; g++)
fprintf(X_output_file, "%d\t", h_post_X[i*G + g]);
fprintf(X_output_file, "%d\n", h_post_X[i*G + (G-1)]);
}
fclose(X_output_file);
free(h_post_X);
//gamma
double* h_post_gamma = (double*)malloc(2*B*sizeof(double));
cudaMemcpy(h_post_gamma, d_gamma, 2*B*sizeof(double), cudaMemcpyDeviceToHost);
char gamma_output_filename[200];
strcpy(gamma_output_filename, output_file);
strcat(gamma_output_filename, "_post_gamma.txt");
FILE *gamma_output_file;
gamma_output_file = fopen(gamma_output_filename,"w");
for (int i=0; i<2; i++){
for (int b=0; b<B-1; b++)
fprintf(gamma_output_file, "%lf\t", h_post_gamma[i*B + b]);
fprintf(gamma_output_file, "%lf\n", h_post_gamma[i*B + (B-1)]);
}
fclose(gamma_output_file);
free(h_post_gamma);
//alpha
double* h_post_alpha = (double*)malloc(G*sizeof(double));
cudaMemcpy(h_post_alpha, d_alpha, G*sizeof(double), cudaMemcpyDeviceToHost);
char alpha_output_filename[200];
strcpy(alpha_output_filename, output_file);
strcat(alpha_output_filename, "_post_alpha.txt");
FILE *alpha_output_file;
alpha_output_file = fopen(alpha_output_filename,"w");
for (int g=0; g<G; g++){
fprintf(alpha_output_file, "%lf\n", h_post_alpha[g]);
}
fclose(alpha_output_file);
free(h_post_alpha);
//L ('s post prob)
double* h_post_L = (double*)malloc(G*(K-1)*sizeof(double));
cudaMemcpy(h_post_L, d_post_prob, G*(K-1)*sizeof(double), cudaMemcpyDeviceToHost);
char L_output_filename[200];
strcpy(L_output_filename, output_file);
strcat(L_output_filename, "_post_L.txt");
FILE *L_output_file;
L_output_file = fopen(L_output_filename,"w");
for (int k=1; k<K; k++){
for (int g=0; g<G-1; g++)
fprintf(L_output_file, "%lf\t", h_post_L[(k-1)*G + g]);
fprintf(L_output_file, "%lf\n", h_post_L[(k-1)*G + (G-1)]);
}
fclose(L_output_file);
free(h_post_L);
//beta
double* h_post_beta = (double*)malloc(G*(K-1)*sizeof(double));
cudaMemcpy(h_post_beta, &d_beta[G], G*(K-1)*sizeof(double), cudaMemcpyDeviceToHost);
char beta_output_filename[200];
strcpy(beta_output_filename, output_file);
strcat(beta_output_filename, "_post_beta.txt");
FILE *beta_output_file;
beta_output_file = fopen(beta_output_filename,"w");
for (int g=0; g<G-1; g++)
fprintf(beta_output_file, "0.000000\t");//Directly print 0 to save time
fprintf(beta_output_file, "0.000000\n");
for (int k=1; k<K; k++){
for (int g=0; g<G-1; g++)
fprintf(beta_output_file, "%lf\t", h_post_beta[(k-1)*G + g]);
fprintf(beta_output_file, "%lf\n", h_post_beta[(k-1)*G + (G-1)]);
}
fclose(beta_output_file);
free(h_post_beta);
//p and tau0
double* h_post_p = (double*)malloc(2*sizeof(double));
cudaMemcpy(h_post_p, d_p, 2*sizeof(double), cudaMemcpyDeviceToHost);
char p_output_filename[200];
strcpy(p_output_filename, output_file);
strcat(p_output_filename, "_post_p.txt");
FILE *p_output_file;
p_output_file = fopen(p_output_filename,"w");
for (int i=0; i<2; i++){
fprintf(p_output_file, "%lf\t", h_post_p[i]);
}
fclose(p_output_file);
free(h_post_p);
//nu
double* h_post_nu = (double*)malloc(G*(B-1)*sizeof(double));
cudaMemcpy(h_post_nu, &d_nu[G], G*(B-1)*sizeof(double), cudaMemcpyDeviceToHost);
char nu_output_filename[200];
strcpy(nu_output_filename, output_file);
strcat(nu_output_filename, "_post_nu.txt");
FILE *nu_output_file;
nu_output_file = fopen(nu_output_filename,"w");
for (int g=0; g<G-1; g++)
fprintf(nu_output_file, "0.000000\t");//Directly print 0 to save time
fprintf(nu_output_file, "0.000000\n");
for (int b=1; b<B; b++){
for (int g=0; g<G-1; g++)
fprintf(nu_output_file, "%lf\t", h_post_nu[(b-1)*G + g]);
fprintf(nu_output_file, "%lf\n", h_post_nu[(b-1)*G + (G-1)]);
}
fclose(nu_output_file);
free(h_post_nu);
//delta
double* h_post_delta = (double*)malloc(N*sizeof(double));
cudaMemcpy(h_post_delta, d_delta, N*sizeof(double), cudaMemcpyDeviceToHost);
char delta_output_filename[200];
strcpy(delta_output_filename, output_file);
strcat(delta_output_filename, "_post_delta.txt");
FILE *delta_output_file;
delta_output_file = fopen(delta_output_filename,"w");
for (int i=0; i<N; i++){
fprintf(delta_output_file, "%lf\n", h_post_delta[i]);
}
fclose(delta_output_file);
free(h_post_delta);
//phi
double* h_post_phi = (double*)malloc(G*B*sizeof(double));
cudaMemcpy(h_post_phi, d_phi, G*B*sizeof(double), cudaMemcpyDeviceToHost);
char phi_output_filename[200];
strcpy(phi_output_filename, output_file);
strcat(phi_output_filename, "_post_phi.txt");
FILE *phi_output_file;
phi_output_file = fopen(phi_output_filename,"w");
for (int b=0; b<B; b++){
for (int g=0; g<G-1; g++)
fprintf(phi_output_file, "%lf\t", h_post_phi[b*G + g]);
fprintf(phi_output_file, "%lf\n", h_post_phi[b*G + (G-1)]);
}
fclose(phi_output_file);
free(h_post_phi);
//W
int* h_post_W = (int*)malloc(N*sizeof(int));
cudaMemcpy(h_post_W, d_W, N*sizeof(int), cudaMemcpyDeviceToHost);
char W_output_filename[200];
strcpy(W_output_filename, output_file);
strcat(W_output_filename, "_post_W.txt");
FILE *W_output_file;
W_output_file = fopen(W_output_filename,"w");
for (int i=0; i<N; i++){
fprintf(W_output_file, "%d\n", h_post_W[i]);
}
fclose(W_output_file);
free(h_post_W);
//pi
double* h_post_pi = (double*)malloc(B*K*sizeof(double));
cudaMemcpy(h_post_pi, d_pi, B*K*sizeof(double), cudaMemcpyDeviceToHost);
char pi_output_filename[200];
strcpy(pi_output_filename, output_file);
strcat(pi_output_filename, "_post_pi.txt");
FILE *pi_output_file;
pi_output_file = fopen(pi_output_filename,"w");
for (int b=0; b<B; b++){
for (int k=0; k<K-1; k++)
fprintf(pi_output_file, "%lf\t", h_post_pi[b*K + k]);
fprintf(pi_output_file, "%lf\n", h_post_pi[b*K + (K-1)]);
}
fclose(pi_output_file);
free(h_post_pi);
//BIC
double* d_log_likelihood_partial; cudaMalloc(&d_log_likelihood_partial, N*K*sizeof(double));//k*N + i
sample_index = 0;
for (int b=0; b<B; b++){
for (int i=0; i<n_b[b]; i++){
for (int k=0; k<K; k++){
fill_log_likelihood_1<<<(G-1)/threads_per_block + 1, threads_per_block>>>(d_alpha, &d_beta[k*G], &d_nu[b*G], &d_delta[sample_index+i],
&d_gamma[b], &d_gamma[B+b], &d_phi[b*G], &d_Y[(sample_index+i)*G], d_temp_double, G);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>>(d_temp_double, &d_log_likelihood_partial[k*N + sample_index + i], G);
}
}
sample_index += n_b[b];
}
fill_log_likelihood_2 <<<(N-1)/threads_per_block + 1, threads_per_block>>> (d_pi, d_log_likelihood_partial, d_temp_double, N, K);
sum_on_gpu <double> <<<1, threads_per_block, threads_per_block*sizeof(double)>>> (d_temp_double, d_temp_double, N);
double log_likelihood;
cudaMemcpy(&log_likelihood, d_temp_double, sizeof(double), cudaMemcpyDeviceToHost);
double BIC = -2*log_likelihood + log(G*N)*((B+G)*K + 2*B + G*(B*2 - 1) + N - B);
char BIC_output_filename[200];
strcpy(BIC_output_filename, output_file);
strcat(BIC_output_filename, "_BIC.txt");
FILE *BIC_output_file;
BIC_output_file = fopen(BIC_output_filename,"w");
fprintf(BIC_output_file, "%lf\n", BIC);
fclose(BIC_output_file);
//(Optional) Print out all preserved iterations.
if (print_preserved){
printf("Start writing preserved iterations.\n");
//gamma
double* h_preserved_gamma = (double*)malloc(2*B*n_preserved*sizeof(double));
cudaMemcpy(h_preserved_gamma, d_preserved_gamma, 2*B*n_preserved*sizeof(double), cudaMemcpyDeviceToHost);
char gamma_preserved_filename[200];
strcpy(gamma_preserved_filename, output_file);
strcat(gamma_preserved_filename, "_preserved_gamma.txt");
FILE *gamma_preserved_file;
gamma_preserved_file = fopen(gamma_preserved_filename,"w");
for (int pos=0; pos<2*B; pos++){
for (int iter=0; iter<n_preserved-1; iter++)
fprintf(gamma_preserved_file, "%lf\t", h_preserved_gamma[pos*n_preserved + iter]);
fprintf(gamma_preserved_file, "%lf\n", h_preserved_gamma[pos*n_preserved + (n_preserved-1)]);
}
fclose(gamma_preserved_file);
free(h_preserved_gamma);
//alpha
double* h_preserved_alpha = (double*)malloc(G*n_preserved*sizeof(double));
cudaMemcpy(h_preserved_alpha, d_preserved_alpha, G*n_preserved*sizeof(double), cudaMemcpyDeviceToHost);
char alpha_preserved_filename[200];
strcpy(alpha_preserved_filename, output_file);
strcat(alpha_preserved_filename, "_preserved_alpha.txt");
FILE *alpha_preserved_file;
alpha_preserved_file = fopen(alpha_preserved_filename,"w");
for (int g=0; g<G; g++){
for (int iter=0; iter<n_preserved-1; iter++)
fprintf(alpha_preserved_file, "%lf\t", h_preserved_alpha[g*n_preserved + iter]);
fprintf(alpha_preserved_file, "%lf\n", h_preserved_alpha[g*n_preserved + (n_preserved-1)]);
}
fclose(alpha_preserved_file);
free(h_preserved_alpha);
//L
int* h_preserved_L = (int*)malloc(G*(K-1)*n_preserved*sizeof(int));
cudaMemcpy(h_preserved_L, d_preserved_L, G*(K-1)*n_preserved*sizeof(int), cudaMemcpyDeviceToHost);
char L_preserved_filename[200];
strcpy(L_preserved_filename, output_file);
strcat(L_preserved_filename, "_preserved_L.txt");
FILE *L_preserved_file;
L_preserved_file = fopen(L_preserved_filename,"w");
for (int pos=0; pos<(K-1)*G; pos++){
for (int iter=0; iter<n_preserved-1; iter++)
fprintf(L_preserved_file, "%d\t", h_preserved_L[pos*n_preserved + iter]);
fprintf(L_preserved_file, "%d\n", h_preserved_L[pos*n_preserved + (n_preserved-1)]);
}
fclose(L_preserved_file);
free(h_preserved_L);
//beta
double* h_preserved_beta = (double*)malloc(G*(K-1)*n_preserved*sizeof(double));
cudaMemcpy(h_preserved_beta, d_preserved_beta, G*(K-1)*n_preserved*sizeof(double), cudaMemcpyDeviceToHost);
char beta_preserved_filename[200];
strcpy(beta_preserved_filename, output_file);
strcat(beta_preserved_filename, "_preserved_beta.txt");
FILE *beta_preserved_file;
beta_preserved_file = fopen(beta_preserved_filename,"w");
for (int pos=0; pos<G*(K-1); pos++){
for (int iter=0; iter<n_preserved-1; iter++)
fprintf(beta_preserved_file, "%lf\t", h_preserved_beta[pos*n_preserved + iter]);
fprintf(beta_preserved_file, "%lf\n", h_preserved_beta[pos*n_preserved + (n_preserved-1)]);
}
fclose(beta_preserved_file);
free(h_preserved_beta);
//p and tau0
double* h_preserved_p = (double*)malloc(2*n_preserved*sizeof(double));
cudaMemcpy(h_preserved_p, d_preserved_p, 2*n_preserved*sizeof(double), cudaMemcpyDeviceToHost);
char p_preserved_filename[200];
strcpy(p_preserved_filename, output_file);
strcat(p_preserved_filename, "_preserved_p.txt");
FILE *p_preserved_file;
p_preserved_file = fopen(p_preserved_filename,"w");
for (int i=0; i<2; i++){
for (int iter=0; iter<n_preserved-1; iter++)
fprintf(p_preserved_file, "%lf\t", h_preserved_p[i*n_preserved + iter]);
fprintf(p_preserved_file, "%lf\n", h_preserved_p[i*n_preserved + (n_preserved-1)]);
}
fclose(p_preserved_file);
free(h_preserved_p);
//nu
double* h_preserved_nu = (double*)malloc(G*(B-1)*n_preserved*sizeof(double));
cudaMemcpy(h_preserved_nu, d_preserved_nu, G*(B-1)*n_preserved*sizeof(double), cudaMemcpyDeviceToHost);
char nu_preserved_filename[200];
strcpy(nu_preserved_filename, output_file);
strcat(nu_preserved_filename, "_preserved_nu.txt");
FILE *nu_preserved_file;
nu_preserved_file = fopen(nu_preserved_filename,"w");
for (int pos=0; pos<(B-1)*G; pos++){
for (int iter=0; iter<n_preserved-1; iter++)
fprintf(nu_preserved_file, "%lf\t", h_preserved_nu[pos*n_preserved + iter]);
fprintf(nu_preserved_file, "%lf\n", h_preserved_nu[pos*n_preserved + (n_preserved-1)]);
}
fclose(nu_preserved_file);
free(h_preserved_nu);
//delta
double* h_preserved_delta = (double*)malloc(N*n_preserved*sizeof(double));
cudaMemcpy(h_preserved_delta, d_preserved_delta, N*n_preserved*sizeof(double), cudaMemcpyDeviceToHost);
char delta_preserved_filename[200];
strcpy(delta_preserved_filename, output_file);
strcat(delta_preserved_filename, "_preserved_delta.txt");
FILE *delta_preserved_file;
delta_preserved_file = fopen(delta_preserved_filename,"w");
for (int i=0; i<N; i++){
for (int iter=0; iter<n_preserved-1; iter++)
fprintf(delta_preserved_file, "%lf\t", h_preserved_delta[i*n_preserved + iter]);
fprintf(delta_preserved_file, "%lf\n", h_preserved_delta[i*n_preserved + (n_preserved-1)]);
}
fclose(delta_preserved_file);
free(h_preserved_delta);
//phi
double* h_preserved_phi = (double*)malloc(G*B*n_preserved*sizeof(double));
cudaMemcpy(h_preserved_phi, d_preserved_phi, G*B*n_preserved*sizeof(double), cudaMemcpyDeviceToHost);
char phi_preserved_filename[200];
strcpy(phi_preserved_filename, output_file);
strcat(phi_preserved_filename, "_preserved_phi.txt");
FILE *phi_preserved_file;
phi_preserved_file = fopen(phi_preserved_filename,"w");
for (int pos=0; pos<B*G; pos++){
for (int iter=0; iter<n_preserved-1; iter++)
fprintf(phi_preserved_file, "%lf\t", h_preserved_phi[pos*n_preserved + iter]);
fprintf(phi_preserved_file, "%lf\n", h_preserved_phi[pos*n_preserved + (n_preserved-1)]);
}
fclose(phi_preserved_file);
free(h_preserved_phi);
//W
int* h_preserved_W = (int*)malloc(N*n_preserved*sizeof(int));
cudaMemcpy(h_preserved_W, d_preserved_W, N*n_preserved*sizeof(int), cudaMemcpyDeviceToHost);
char W_preserved_filename[200];
strcpy(W_preserved_filename, output_file);
strcat(W_preserved_filename, "_preserved_W.txt");
FILE *W_preserved_file;
W_preserved_file = fopen(W_preserved_filename,"w");
for (int i=0; i<N; i++){
for (int iter=0; iter<n_preserved-1; iter++)
fprintf(W_preserved_file, "%d\t", h_preserved_W[i*n_preserved + iter]);
fprintf(W_preserved_file, "%d\n", h_preserved_W[i*n_preserved + (n_preserved-1)]);
}
fclose(W_preserved_file);
free(h_preserved_W);
//pi
double* h_preserved_pi = (double*)malloc(B*K*n_preserved*sizeof(double));
cudaMemcpy(h_preserved_pi, d_preserved_pi, B*K*n_preserved*sizeof(double), cudaMemcpyDeviceToHost);
char pi_preserved_filename[200];
strcpy(pi_preserved_filename, output_file);
strcat(pi_preserved_filename, "_preserved_pi.txt");
FILE *pi_preserved_file;
pi_preserved_file = fopen(pi_preserved_filename,"w");
for (int pos=0; pos<B*K; pos++){
for (int iter=0; iter<n_preserved-1; iter++)
fprintf(pi_preserved_file, "%lf\t", h_preserved_pi[pos*n_preserved + iter]);
fprintf(pi_preserved_file, "%lf\n", h_preserved_pi[pos*n_preserved + (n_preserved-1)]);
}
fclose(pi_preserved_file);
free(h_preserved_pi);
}
return (0);
}
|
119
|
#include <stdio.h>
#include <stdlib.h>
#define N 1000
#define T 256
__global__ void vecInc(int *A,int *newA){
int i;
for (i = threadIdx.x;i < N;i = i + T){
newA[i] = A[i] + 1;
}
}
int main (int argc, char *argv[]){
int i;
int size = N * sizeof ( int);
int a[N], new_a[N], *devA, *dev_newA;
printf("Original A array\n");
for (i = 0; i < N; i++){
a[i] = rand() % 100;
printf("%d ",a[i]);
}
cudaMalloc( (void**)&devA, size);
cudaMalloc( (void**)&dev_newA, size);
cudaMemcpy( devA, a, size, cudaMemcpyHostToDevice);
vecInc<<<1, T>>>(devA, dev_newA);
cudaMemcpy( new_a, dev_newA, size, cudaMemcpyDeviceToHost);
cudaFree( devA);
cudaFree( dev_newA);
printf("\nNew A array \n");
for (i= 0; i < N; i++){
printf("%d ",new_a[i]);
}
printf("\n");
}
|
120
|
#include <stdio.h>
int N;
__global__ void matrixMultGPU(float *a, float *b, float *c,int N) {
int k, fil, sum = 0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
//int fil = threadIdx.y + blockDim.y * blockIdx.y;
for(fil=0; fil<N; fil++){
for (k = 0; k < N; k++) {
sum += a[fil * N + k] * b[k * N + col];
}
c[fil * N + col] = sum;
sum=0;
}
}
int main(int argc, char const *argv[]) {
N = atoi(argv[1]);
int blockSize, gridSize;
gridSize=atoi(argv[2]);
blockSize= atoi(argv[3]);
float *c = (float *)malloc(N * N * sizeof(float));
float *a = (float *)malloc(N * N * sizeof(float));
float *b = (float *)malloc(N * N * sizeof(float));
float *dev_a, *dev_b, *dev_c;
int cont,i,j;
/* inicializando variables con datos foo*/
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
a[i * N + j] = (rand()%10);
b[i * N + j] = (rand()%10);
}
}
int size = N * N * sizeof(int);
cudaMalloc((void **) &dev_a, size);
cudaMalloc((void **) &dev_b, size);
cudaMalloc((void **) &dev_c, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
matrixMultGPU<<<gridSize, blockSize>>>(dev_a, dev_b, dev_c, N);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
//imprimiendo
if (argv[4]!=NULL){
printf("Matrix A --------------------\n");
for (int y = 0; y < N; y++) {
for (int x = 0; x < N; x++) {
printf("%f ", a[y * N + x]);
}
printf("\n");
}
printf("Matrix B --------------------\n");
for (int y = 0; y < N; y++) {
for (int x = 0; x < N; x++) {
printf("%f ", b[y * N + x]);
}
printf("\n");
}
printf("Matrix C --------------------\n");
for (int y = 0; y < N; y++) {
for (int x = 0; x < N; x++) {
printf("%f ", c[y * N + x]);
}
printf("\n");
}
}
return 0;
}
|
121
|
#include "hor.cuh"
__global__ void horspool(char *text, unsigned long text_size, char *pattern,
int pattern_size, unsigned char hbc[], int stride_length, int *match) {
int i;
unsigned long thread_id = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long start_inx = thread_id * stride_length;
unsigned long boundary = start_inx + stride_length;
unsigned long j = start_inx;
/*
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int start_index = tid * stride_length;
int end_index = (tid + 1) * stride_length + pattern_size - 1;
*/
// match[tid] = 0;
while(j < boundary && j <= text_size - pattern_size) {
i = 0;
while(i < pattern_size && pattern[i] == text[j + i]) i++;
if(i == pattern_size) match[j] = 1;
j += hbc[text[j + pattern_size - 1]];
}
}
void pre_horspool(char *pattern, int pattern_size, unsigned char hbc[]){
int i;
for(i = 0; i < SIGMA; i++) hbc[i] = pattern_size;
for(i = 0; i < pattern_size - 1; i++) hbc[pattern[i]] = pattern_size - i - 1;
}
|
122
|
/*
* UpdaterEy1D.cpp
*
* Created on: 01 февр. 2016 г.
* Author: aleksandr
*/
#include "UpdaterEy1D.h"
__device__
void UpdaterEy1D::operator() (const int indx) {
Ey[indx] = Ceye[indx] * Ey[indx] - Ceyh[indx]*(Hz[indx] - Hz[indx-1]);
}
|
123
|
#include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__ (128,2) sw4_1 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r1, b_r1;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
a_mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
a_muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
a_r1 = 1e0 / 6 * (strx[i] * ((2 * a_mux1 + la[k][j][i-1] * strx[i-1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i-2] * strx[i-2]) * (u_0[k][j][i-2] - u_0[k][j][i]) +
(2 * a_mux2 + la[k][j][i-2] * strx[i-2] + la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i] + 3 * la[k][j][i-1] * strx[i-1]) * (u_0[k][j][i-1] - u_0[k][j][i]) +
(2 * a_mux3 + la[k][j][i-1] * strx[i-1] + la[k][j][i+2] * strx[i+2] + 3 * la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i]) * (u_0[k][j][i+1] - u_0[k][j][i]) +
(2 * a_mux4 + la[k][j][i+1] * strx[i+1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i+2] * strx[i+2]) * (u_0[k][j][i+2] - u_0[k][j][i]))
+ stry[j] * (a_muy1 * (u_0[k][j-2][i] - u_0[k][j][i]) + a_muy2 * (u_0[k][j-1][i] - u_0[k][j][i]) + a_muy3 * (u_0[k][j+1][i] - u_0[k][j][i]) + a_muy4 * (u_0[k][j+2][i] - u_0[k][j][i])) + strz[k] * (a_muz1 * (u_0[k-2][j][i] - u_0[k][j][i]) + a_muz2 * (u_0[k-1][j][i] - u_0[k][j][i]) + a_muz3 * (u_0[k+1][j][i] - u_0[k][j][i]) + a_muz4 * (u_0[k+2][j][i] - u_0[k][j][i])));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k][j][i-2] * (u_1[k][j-2][i-2] - u_1[k][j+2][i-2] + 8 * (-u_1[k][j-1][i-2] + u_1[k][j+1][i-2])) - 8 * (la[k][j][i-1] * (u_1[k][j-2][i-1] - u_1[k][j+2][i-1] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j+1][i-1]))) + 8 * (la[k][j][i+1] * (u_1[k][j-2][i+1] - u_1[k][j+2][i+1] + 8 * (-u_1[k][j-1][i+1] + u_1[k][j+1][i+1]))) - (la[k][j][i+2] * (u_1[k][j-2][i+2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j-1][i+2] + u_1[k][j+1][i+2]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (la[k][j][i-2] * (u_2[k-2][j][i-2] - u_2[k+2][j][i-2] + 8 * (-u_2[k-1][j][i-2] + u_2[k+1][j][i-2])) - 8 * (la[k][j][i-1] * (u_2[k-2][j][i-1] - u_2[k+2][j][i-1] + 8 * (-u_2[k-1][j][i-1] + u_2[k+1][j][i-1]))) + 8 * (la[k][j][i+1] * (u_2[k-2][j][i+1] - u_2[k+2][j][i+1] + 8 * (-u_2[k-1][j][i+1] + u_2[k+1][j][i+1]))) - (la[k][j][i+2] * (u_2[k-2][j][i+2] - u_2[k+2][j][i+2] + 8 * (-u_2[k-1][j][i+2] + u_2[k+1][j][i+2]))));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k][j-2][i-2] - u_1[k][j-2][i+2] + 8 * (-u_1[k][j-2][i-1] + u_1[k][j-2][i+1])) - 8 * (mu[k][j-1][i] * (u_1[k][j-1][i-2] - u_1[k][j-1][i+2] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j-1][i+1]))) + 8 * (mu[k][j+1][i] * (u_1[k][j+1][i-2] - u_1[k][j+1][i+2] + 8 * (-u_1[k][j+1][i-1] + u_1[k][j+1][i+1]))) - (mu[k][j+2][i] * (u_1[k][j+2][i-2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j+2][i-1] + u_1[k][j+2][i+1]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (mu[k-2][j][i] * (u_2[k-2][j][i-2] - u_2[k-2][j][i+2] + 8 * (-u_2[k-2][j][i-1] + u_2[k-2][j][i+1])) - 8 * (mu[k-1][j][i] * (u_2[k-1][j][i-2] - u_2[k-1][j][i+2] + 8 * (-u_2[k-1][j][i-1] + u_2[k-1][j][i+1]))) + 8 * (mu[k+1][j][i] * (u_2[k+1][j][i-2] - u_2[k+1][j][i+2] + 8 * (-u_2[k+1][j][i-1] + u_2[k+1][j][i+1]))) - (mu[k+2][j][i] * (u_2[k+2][j][i-2] - u_2[k+2][j][i+2] + 8 * (-u_2[k+2][j][i-1] + u_2[k+2][j][i+1]))));
uacc_0[k][j][i] = a1 * uacc_0[k][j][i] + cof * a_r1;
b_mux1 = mu[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2] + mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i] + 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1] + mu[k+1][j][i+2] * strx[i+2] + 3.0 * mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i+2] * strx[i+2];
b_muy1 = mu[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] -3e0 / 4 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2] + mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j] + 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1] + mu[k+1][j+2][i] * stry[j+2] + 3.0 * mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] - 3e0 / 4 * mu[k+1][j+2][i] * stry[j+2];
b_muz1 = mu[k+1-1][j][i] * strz[k+1-1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k+1-2][j][i] * strz[k+1-2];
b_muz2 = mu[k+1-2][j][i] * strz[k+1-2] + mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k+1-1][j][i] * strz[k+1-1];
b_muz3 = mu[k+1-1][j][i] * strz[k+1-1] + mu[k+1+2][j][i] * strz[k+1+2] + 3.0 * mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+1+1][j][i] * strz[k+1+1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 /4 * mu[k+1+2][j][i] * strz[k+1+2];
b_r1 = 1e0 / 6 * (strx[i] * ((2 * b_mux1 + la[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i-2] * strx[i-2]) * (u_0[k+1][j][i-2] - u_0[k+1][j][i]) +
(2 * b_mux2 + la[k+1][j][i-2] * strx[i-2] + la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i] + 3 * la[k+1][j][i-1] * strx[i-1]) * (u_0[k+1][j][i-1] - u_0[k+1][j][i]) +
(2 * b_mux3 + la[k+1][j][i-1] * strx[i-1] + la[k+1][j][i+2] * strx[i+2] + 3 * la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i]) * (u_0[k+1][j][i+1] - u_0[k+1][j][i]) +
(2 * b_mux4 + la[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i+2] * strx[i+2]) * (u_0[k+1][j][i+2] - u_0[k+1][j][i]))
+ stry[j] * (b_muy1 * (u_0[k+1][j-2][i] - u_0[k+1][j][i]) + b_muy2 * (u_0[k+1][j-1][i] - u_0[k+1][j][i]) + b_muy3 * (u_0[k+1][j+1][i] - u_0[k+1][j][i]) + b_muy4 * (u_0[k+1][j+2][i] - u_0[k+1][j][i])) + strz[k+1] * (b_muz1 * (u_0[k+1-2][j][i] - u_0[k+1][j][i]) + b_muz2 * (u_0[k+1-1][j][i] - u_0[k+1][j][i]) + b_muz3 * (u_0[k+1+1][j][i] - u_0[k+1][j][i]) + b_muz4 * (u_0[k+1+2][j][i] - u_0[k+1][j][i])));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k+1][j][i-2] * (u_1[k+1][j-2][i-2] - u_1[k+1][j+2][i-2] + 8 * (-u_1[k+1][j-1][i-2] + u_1[k+1][j+1][i-2])) - 8 * (la[k+1][j][i-1] * (u_1[k+1][j-2][i-1] - u_1[k+1][j+2][i-1] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j+1][i-1]))) + 8 * (la[k+1][j][i+1] * (u_1[k+1][j-2][i+1] - u_1[k+1][j+2][i+1] + 8 * (-u_1[k+1][j-1][i+1] + u_1[k+1][j+1][i+1]))) - (la[k+1][j][i+2] * (u_1[k+1][j-2][i+2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j-1][i+2] + u_1[k+1][j+1][i+2]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (la[k+1][j][i-2] * (u_2[k+1-2][j][i-2] - u_2[k+1+2][j][i-2] + 8 * (-u_2[k+1-1][j][i-2] + u_2[k+1+1][j][i-2])) - 8 * (la[k+1][j][i-1] * (u_2[k+1-2][j][i-1] - u_2[k+1+2][j][i-1] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1+1][j][i-1]))) + 8 * (la[k+1][j][i+1] * (u_2[k+1-2][j][i+1] - u_2[k+1+2][j][i+1] + 8 * (-u_2[k+1-1][j][i+1] + u_2[k+1+1][j][i+1]))) - (la[k+1][j][i+2] * (u_2[k+1-2][j][i+2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1-1][j][i+2] + u_2[k+1+1][j][i+2]))));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k+1][j-2][i] * (u_1[k+1][j-2][i-2] - u_1[k+1][j-2][i+2] + 8 * (-u_1[k+1][j-2][i-1] + u_1[k+1][j-2][i+1])) - 8 * (mu[k+1][j-1][i] * (u_1[k+1][j-1][i-2] - u_1[k+1][j-1][i+2] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j-1][i+1]))) + 8 * (mu[k+1][j+1][i] * (u_1[k+1][j+1][i-2] - u_1[k+1][j+1][i+2] + 8 * (-u_1[k+1][j+1][i-1] + u_1[k+1][j+1][i+1]))) - (mu[k+1][j+2][i] * (u_1[k+1][j+2][i-2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j+2][i-1] + u_1[k+1][j+2][i+1]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (mu[k+1-2][j][i] * (u_2[k+1-2][j][i-2] - u_2[k+1-2][j][i+2] + 8 * (-u_2[k+1-2][j][i-1] + u_2[k+1-2][j][i+1])) - 8 * (mu[k+1-1][j][i] * (u_2[k+1-1][j][i-2] - u_2[k+1-1][j][i+2] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1-1][j][i+1]))) + 8 * (mu[k+1+1][j][i] * (u_2[k+1+1][j][i-2] - u_2[k+1+1][j][i+2] + 8 * (-u_2[k+1+1][j][i-1] + u_2[k+1+1][j][i+1]))) - (mu[k+1+2][j][i] * (u_2[k+1+2][j][i-2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1+2][j][i-1] + u_2[k+1+2][j][i+1]))));
uacc_0[k+1][j][i] = a1 * uacc_0[k+1][j][i] + cof * b_r1;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_2 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r2, b_r2;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
a_mux1 = mu[k][j][i-1] * strx[i-1];
a_mux1 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2];
a_mux2 += mu[k][j][i+1] * strx[i+1];
a_mux2 += 3.0 * mu[k][j][i] * strx[i];
a_mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1];
a_mux3 += mu[k][j][i+2] * strx[i+2];
a_mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
a_mux3 += 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1];
a_mux4 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1];
a_muy1 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
a_muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2];
a_muy2 += mu[k][j+1][i] * stry[j+1];
a_muy2 += 3.0 * mu[k][j][i] * stry[j];
a_muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1];
a_muy3 += mu[k][j+2][i] * stry[j+2];
a_muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
a_muy3 += 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1];
a_muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
a_muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
a_muz1 = mu[k-1][j][i] * strz[k-1];
a_muz1 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2];
a_muz2 += mu[k+1][j][i] * strz[k+1];
a_muz2 += 3.0 * mu[k][j][i] * strz[k];
a_muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1];
a_muz3 += mu[k+2][j][i] * strz[k+2];
a_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
a_muz3 += 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1];
a_muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
double _t_2_ = u_1[k][j][i-2];
_t_2_ -= u_1[k][j][i];
double _t_1_ = a_mux1 * _t_2_;
double _t_3_ = u_1[k][j][i-1];
_t_3_ -= u_1[k][j][i];
_t_1_ += a_mux2 * _t_3_;
double _t_4_ = u_1[k][j][i+1];
_t_4_ -= u_1[k][j][i];
_t_1_ += a_mux3 * _t_4_;
double _t_5_ = u_1[k][j][i+2];
_t_5_ -= u_1[k][j][i];
_t_1_ += a_mux4 * _t_5_;
double _t_0_ = strx[i] * _t_1_;
double _t_8_ = u_1[k][j-2][i];
_t_8_ -= u_1[k][j][i];
double _t_7_ = 2.0 * a_muy1;
double _v_23_ = la[k][j-1][i] * stry[j-1];
_t_7_ += _v_23_;
_t_7_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
double _t_9_ = 3.0 * la[k][j][i] * stry[j];
_t_9_ += 3.0 * la[k][j-1][i] * stry[j-1];
_t_9_ += 2.0 * a_muy2;
double _t_11_ = 3.0 * la[k][j][i] * stry[j];
_t_11_ += 2.0 * a_muy3;
double _t_13_ = -(3.0 / 4.0 * la[k][j][i] * stry[j]);
_t_13_ += 2.0 * a_muy4;
_t_7_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
double _t_6_ = _t_7_ * _t_8_;
_t_9_ += la[k][j-2][i] * stry[j-2];
_t_9_ += la[k][j+1][i] * stry[j+1];
double _t_10_ = u_1[k][j-1][i];
_t_10_ -= u_1[k][j][i];
_t_6_ += _t_9_ * _t_10_;
_t_11_ += _v_23_;
_t_11_ += la[k][j+2][i] * stry[j+2];
_t_11_ += 3.0 * la[k][j+1][i] * stry[j+1];
double _t_12_ = u_1[k][j+1][i];
_t_12_ -= u_1[k][j][i];
_t_6_ += _t_11_ * _t_12_;
_t_13_ += la[k][j+1][i] * stry[j+1];
_t_13_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
double _t_14_ = u_1[k][j+2][i];
_t_14_ -= u_1[k][j][i];
_t_6_ += _t_13_ * _t_14_;
_t_0_ += stry[j] * _t_6_;
double _t_16_ = u_1[k-2][j][i];
_t_16_ -= u_1[k][j][i];
double _t_15_ = a_muz1 * _t_16_;
double _t_17_ = -(u_1[k][j][i]);
_t_17_ += u_1[k-1][j][i];
_t_15_ += a_muz2 * _t_17_;
double _t_18_ = -(u_1[k][j][i]);
_t_18_ += u_1[k+1][j][i];
_t_15_ += a_muz3 * _t_18_;
double _t_19_ = -(u_1[k][j][i]);
_t_19_ += u_1[k+2][j][i];
_t_15_ += a_muz4 * _t_19_;
_t_0_ += strz[k] * _t_15_;
a_r2 = 1.0 / 6.0 * _t_0_;
double _t_25_ = -u_0[k][j-1][i-2];
_t_25_ += u_0[k][j+1][i-2];
double _t_40_ = u_0[k][j-1][i-2];
_t_40_ -= u_0[k][j-1][i+2];
double _t_43_ = u_0[k][j+1][i-2];
_t_43_ -= u_0[k][j+1][i+2];
double _t_33_ = -u_0[k][j-1][i+2];
_t_33_ += u_0[k][j+1][i+2];
double _t_24_ = 8.0 * _t_25_;
_t_24_ += u_0[k][j-2][i-2];
_t_24_ -= u_0[k][j+2][i-2];
double _t_37_ = u_0[k][j-2][i-2];
_t_37_ -= u_0[k][j-2][i+2];
double _t_45_ = u_0[k][j+2][i-2];
_t_45_ -= u_0[k][j+2][i+2];
double _t_32_ = u_0[k][j-2][i+2];
_t_32_ -= u_0[k][j+2][i+2];
_t_32_ += 8.0 * _t_33_;
double _t_22_ = mu[k][j][i-2] * _t_24_;
double _t_28_ = -u_0[k][j-1][i-1];
_t_28_ += u_0[k][j+1][i-1];
double _t_41_ = -u_0[k][j-1][i-1];
_t_41_ += u_0[k][j-1][i+1];
double _t_44_ = -u_0[k][j+1][i-1];
_t_44_ += u_0[k][j+1][i+1];
double _t_31_ = -u_0[k][j-1][i+1];
_t_31_ += u_0[k][j+1][i+1];
double _t_27_ = 8.0 * _t_28_;
_t_27_ += u_0[k][j-2][i-1];
_t_27_ -= u_0[k][j+2][i-1];
double _t_38_ = -u_0[k][j-2][i-1];
_t_38_ += u_0[k][j-2][i+1];
double _t_46_ = -u_0[k][j+2][i-1];
_t_46_ += u_0[k][j+2][i+1];
double _t_30_ = u_0[k][j-2][i+1];
_t_30_ -= u_0[k][j+2][i+1];
_t_30_ += 8.0 * _t_31_;
_t_22_ -= mu[k][j][i-1] * _t_27_;
_t_22_ += mu[k][j][i+1] * _t_30_;
_t_22_ -= mu[k][j][i+2] * _t_32_;
double _t_21_ = strx[i] * stry[j];
double _t_20_ = _t_21_ * _t_22_;
_t_37_ += 8.0 * _t_38_;
double _t_35_ = la[k][j-2][i] * _t_37_;
_t_40_ += 8.0 * _t_41_;
_t_35_ -= la[k][j-1][i] * _t_40_;
_t_43_ += 8.0 * _t_44_;
_t_35_ += la[k][j+1][i] * _t_43_;
_t_45_ += 8.0 * _t_46_;
_t_35_ -= la[k][j+2][i] * _t_45_;
double _t_34_ = strx[i] * stry[j];
_t_20_ += _t_34_ * _t_35_;
double _t_51_ = -u_2[k-1][j-2][i];
_t_51_ += u_2[k+1][j-2][i];
double _t_50_ = 8.0 * _t_51_;
_t_50_ += u_2[k-2][j-2][i];
_t_50_ -= u_2[k+2][j-2][i];
double _t_48_ = la[k][j-2][i] * _t_50_;
double _t_54_ = -u_2[k-1][j-1][i];
_t_54_ += u_2[k+1][j-1][i];
double _t_53_ = 8.0 * _t_54_;
_t_53_ += u_2[k-2][j-1][i];
_t_53_ -= u_2[k+2][j-1][i];
_t_48_ -= la[k][j-1][i] * _t_53_;
double _t_57_ = -u_2[k-1][j+1][i];
_t_57_ += u_2[k+1][j+1][i];
double _t_70_ = u_2[k+1][j+1][i];
_t_70_ += -u_2[k+1][j-1][i];
double _t_56_ = 8.0 * _t_57_;
_t_56_ += u_2[k-2][j+1][i];
_t_56_ -= u_2[k+2][j+1][i];
_t_48_ += la[k][j+1][i] * _t_56_;
double _t_59_ = -u_2[k-1][j+2][i];
_t_59_ += u_2[k+1][j+2][i];
double _t_69_ = -(u_2[k+1][j+2][i]);
_t_69_ += 8.0 * _t_70_;
_t_69_ += u_2[k+1][j-2][i];
double _t_66_ = -(u_2[k-1][j+2][i]);
_t_66_ += u_2[k-1][j-2][i];
double _t_58_ = 8.0 * _t_59_;
_t_58_ += u_2[k-2][j+2][i];
_t_58_ -= u_2[k+2][j+2][i];
_t_48_ -= la[k][j+2][i] * _t_58_;
double _t_47_ = stry[j] * strz[k];
_t_20_ += _t_47_ * _t_48_;
double _t_64_ = -u_2[k-2][j-1][i];
_t_64_ += u_2[k-2][j+1][i];
double _t_63_ = 8.0 * _t_64_;
_t_63_ += u_2[k-2][j-2][i];
_t_63_ -= u_2[k-2][j+2][i];
double _t_61_ = mu[k-2][j][i] * _t_63_;
_t_61_ += mu[k+1][j][i] * _t_69_;
double _t_67_ = -u_2[k-1][j-1][i];
_t_67_ += u_2[k-1][j+1][i];
_t_66_ += 8.0 * _t_67_;
_t_61_ -= mu[k-1][j][i] * _t_66_;
double _t_72_ = -u_2[k+2][j-1][i];
_t_72_ += u_2[k+2][j+1][i];
double _t_71_ = 8.0 * _t_72_;
_t_71_ += u_2[k+2][j-2][i];
_t_71_ -= u_2[k+2][j+2][i];
_t_61_ -= mu[k+2][j][i] * _t_71_;
double _t_60_ = stry[j] * strz[k];
_t_20_ += _t_60_ * _t_61_;
a_r2 += _t_20_;
double uacc_1kc0jc0ic0 = cof * a_r2;
uacc_1kc0jc0ic0 += a1 * uacc_1[k][j][i];
b_mux1 = mu[k+1][j][i-1] * strx[i-1];
b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2];
b_mux2 += mu[k+1][j][i+1] * strx[i+1];
b_mux2 += 3.0 * mu[k+1][j][i] * strx[i];
b_mux2 += 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1];
b_mux3 += mu[k+1][j][i+2] * strx[i+2];
b_mux3 += 3.0 * mu[k+1][j][i+1] * strx[i+1];
b_mux3 += 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1];
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i+2] * strx[i+2];
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_muy1 = mu[k+1][j-1][i] * stry[j-1];
b_muy1 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muy1 -= 3.0 / 4.0 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2];
b_muy2 += mu[k+1][j+1][i] * stry[j+1];
b_muy2 += 3.0 * mu[k+1][j][i] * stry[j];
b_muy2 += 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1];
b_muy3 += mu[k+1][j+2][i] * stry[j+2];
b_muy3 += 3.0 * mu[k+1][j+1][i] * stry[j+1];
b_muy3 += 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j+2][i] * stry[j+2];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muz1 = mu[k][j][i] * strz[k];
b_muz1 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz1 -= 3.0 / 4.0 * mu[k-1][j][i] * strz[k-1];
b_muz2 = mu[k-1][j][i] * strz[k-1];
b_muz2 += mu[k+2][j][i] * strz[k+2];
b_muz2 += 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz2 += 3.0 * mu[k][j][i] * strz[k];
b_muz3 = mu[k][j][i] * strz[k];
b_muz3 += mu[k+3][j][i] * strz[k+3];
b_muz3 += 3.0 * mu[k+2][j][i] * strz[k+2];
b_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+2][j][i] * strz[k+2];
b_muz4 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 -= 3.0 / 4.0 * mu[k+3][j][i] * strz[k+3];
double _t_89_ = u_1[k-1][j][i];
_t_89_ -= u_1[k+1][j][i];
double _t_88_ = b_muz1 * _t_89_;
double _t_90_ = u_1[k][j][i];
_t_90_ -= u_1[k+1][j][i];
_t_88_ += b_muz2 * _t_90_;
double _t_91_ = u_1[k+2][j][i];
_t_91_ -= u_1[k+1][j][i];
_t_88_ += b_muz3 * _t_91_;
double _t_92_ = u_1[k+3][j][i];
_t_92_ -= u_1[k+1][j][i];
_t_88_ += b_muz4 * _t_92_;
double _t_73_ = strz[k+1] * _t_88_;
double _t_75_ = u_1[k+1][j][i-2];
_t_75_ -= u_1[k+1][j][i];
double _t_74_ = b_mux1 * _t_75_;
double _t_76_ = u_1[k+1][j][i-1];
_t_76_ -= u_1[k+1][j][i];
_t_74_ += b_mux2 * _t_76_;
double _t_77_ = u_1[k+1][j][i+1];
_t_77_ -= u_1[k+1][j][i];
_t_74_ += b_mux3 * _t_77_;
double _t_78_ = u_1[k+1][j][i+2];
_t_78_ -= u_1[k+1][j][i];
_t_74_ += b_mux4 * _t_78_;
_t_73_ += strx[i] * _t_74_;
double _t_81_ = u_1[k+1][j-2][i];
_t_81_ -= u_1[k+1][j][i];
double _t_80_ = 2.0 * b_muy1;
double _v_76_ = la[k+1][j-1][i] * stry[j-1];
_t_80_ += _v_76_;
_t_80_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j];
double _t_82_ = 3.0 * la[k+1][j][i] * stry[j];
_t_82_ += 3.0 * la[k+1][j-1][i] * stry[j-1];
_t_82_ += 2.0 * b_muy2;
double _t_84_ = 3.0 * la[k+1][j][i] * stry[j];
_t_84_ += 2.0 * b_muy3;
double _t_86_ = -(3.0 / 4.0 * la[k+1][j][i] * stry[j]);
_t_86_ += 2.0 * b_muy4;
_t_80_ -= 3.0 / 4.0 * la[k+1][j-2][i] * stry[j-2];
double _t_79_ = _t_80_ * _t_81_;
_t_82_ += la[k+1][j-2][i] * stry[j-2];
double _v_79_ = la[k+1][j+1][i] * stry[j+1];
_t_84_ += 3.0 * la[k+1][j+1][i] * stry[j+1];
_t_82_ += _v_79_;
double _t_83_ = u_1[k+1][j-1][i];
_t_83_ -= u_1[k+1][j][i];
_t_79_ += _t_82_ * _t_83_;
_t_84_ += _v_76_;
_t_84_ += la[k+1][j+2][i] * stry[j+2];
_t_86_ -= 3.0 / 4.0 * la[k+1][j+2][i] * stry[j+2];
double _t_85_ = u_1[k+1][j+1][i];
_t_85_ -= u_1[k+1][j][i];
_t_79_ += _t_84_ * _t_85_;
_t_86_ += _v_79_;
double _t_87_ = -(u_1[k+1][j][i]);
_t_87_ += u_1[k+1][j+2][i];
_t_79_ += _t_86_ * _t_87_;
_t_73_ += stry[j] * _t_79_;
b_r2 = 1.0 / 6.0 * _t_73_;
double _t_137_ = -u_2[k-1][j-1][i];
_t_137_ += u_2[k-1][j+1][i];
double _t_126_ = u_2[k-1][j-1][i];
_t_126_ -= u_2[k+3][j-1][i];
double _t_129_ = u_2[k-1][j+1][i];
_t_129_ -= u_2[k+3][j+1][i];
double _t_145_ = -u_2[k+3][j-1][i];
_t_145_ += u_2[k+3][j+1][i];
double _t_136_ = 8.0 * _t_137_;
_t_136_ += u_2[k-1][j-2][i];
_t_136_ -= u_2[k-1][j+2][i];
double _t_123_ = u_2[k-1][j-2][i];
_t_123_ -= u_2[k+3][j-2][i];
double _t_131_ = u_2[k-1][j+2][i];
_t_131_ -= u_2[k+3][j+2][i];
double _t_144_ = u_2[k+3][j-2][i];
_t_144_ -= u_2[k+3][j+2][i];
_t_144_ += 8.0 * _t_145_;
double _t_134_ = mu[k-1][j][i] * _t_136_;
double _t_143_ = -u_2[k+2][j-1][i];
_t_143_ += u_2[k+2][j+1][i];
double _t_127_ = u_2[k+2][j-1][i];
_t_127_ += -u_2[k][j-1][i];
double _t_130_ = u_2[k+2][j+1][i];
_t_130_ += -u_2[k][j+1][i];
double _t_140_ = -u_2[k][j-1][i];
_t_140_ += u_2[k][j+1][i];
double _t_142_ = 8.0 * _t_143_;
_t_142_ += u_2[k+2][j-2][i];
_t_142_ -= u_2[k+2][j+2][i];
double _t_124_ = u_2[k+2][j-2][i];
_t_124_ += -u_2[k][j-2][i];
double _t_132_ = u_2[k+2][j+2][i];
_t_132_ += -u_2[k][j+2][i];
double _t_139_ = u_2[k][j-2][i];
_t_139_ -= u_2[k][j+2][i];
_t_139_ += 8.0 * _t_140_;
_t_134_ += mu[k+2][j][i] * _t_142_;
_t_134_ -= mu[k][j][i] * _t_139_;
_t_134_ -= mu[k+3][j][i] * _t_144_;
double _t_135_ = stry[j] * strz[k+1];
double _t_96_ = strx[i] * stry[j];
double _t_133_ = _t_135_ * 1.0 / 144.0;
double _t_93_ = _t_133_ * _t_134_;
_t_123_ += 8.0 * _t_124_;
double _t_121_ = la[k+1][j-2][i] * _t_123_;
_t_126_ += 8.0 * _t_127_;
_t_121_ -= la[k+1][j-1][i] * _t_126_;
_t_129_ += 8.0 * _t_130_;
_t_121_ += la[k+1][j+1][i] * _t_129_;
_t_131_ += 8.0 * _t_132_;
_t_121_ -= la[k+1][j+2][i] * _t_131_;
double _t_120_ = _t_135_;
_t_93_ += _t_120_ * _t_121_;
double _t_98_ = -u_0[k+1][j-1][i-2];
_t_98_ += u_0[k+1][j+1][i-2];
double _t_113_ = u_0[k+1][j-1][i-2];
_t_113_ -= u_0[k+1][j-1][i+2];
double _t_116_ = u_0[k+1][j+1][i-2];
_t_116_ -= u_0[k+1][j+1][i+2];
double _t_106_ = -u_0[k+1][j-1][i+2];
_t_106_ += u_0[k+1][j+1][i+2];
double _t_97_ = 8.0 * _t_98_;
_t_97_ += u_0[k+1][j-2][i-2];
_t_97_ -= u_0[k+1][j+2][i-2];
double _t_110_ = u_0[k+1][j-2][i-2];
_t_110_ -= u_0[k+1][j-2][i+2];
double _t_118_ = u_0[k+1][j+2][i-2];
_t_118_ -= u_0[k+1][j+2][i+2];
double _t_105_ = u_0[k+1][j-2][i+2];
_t_105_ -= u_0[k+1][j+2][i+2];
_t_105_ += 8.0 * _t_106_;
double _t_95_ = mu[k+1][j][i-2] * _t_97_;
double _t_101_ = -u_0[k+1][j-1][i-1];
_t_101_ += u_0[k+1][j+1][i-1];
double _t_114_ = -u_0[k+1][j-1][i-1];
_t_114_ += u_0[k+1][j-1][i+1];
double _t_117_ = -u_0[k+1][j+1][i-1];
_t_117_ += u_0[k+1][j+1][i+1];
double _t_104_ = -u_0[k+1][j-1][i+1];
_t_104_ += u_0[k+1][j+1][i+1];
double _t_100_ = 8.0 * _t_101_;
_t_100_ += u_0[k+1][j-2][i-1];
_t_100_ -= u_0[k+1][j+2][i-1];
double _t_111_ = -u_0[k+1][j-2][i-1];
_t_111_ += u_0[k+1][j-2][i+1];
double _t_119_ = -u_0[k+1][j+2][i-1];
_t_119_ += u_0[k+1][j+2][i+1];
double _t_103_ = u_0[k+1][j-2][i+1];
_t_103_ -= u_0[k+1][j+2][i+1];
_t_103_ += 8.0 * _t_104_;
_t_95_ -= mu[k+1][j][i-1] * _t_100_;
_t_95_ += mu[k+1][j][i+1] * _t_103_;
_t_95_ -= mu[k+1][j][i+2] * _t_105_;
double _t_94_ = _t_96_ * 1.0 / 144.0;
_t_93_ += _t_94_ * _t_95_;
_t_110_ += 8.0 * _t_111_;
double _t_108_ = la[k+1][j-2][i] * _t_110_;
_t_113_ += 8.0 * _t_114_;
_t_108_ -= la[k+1][j-1][i] * _t_113_;
_t_116_ += 8.0 * _t_117_;
_t_108_ += la[k+1][j+1][i] * _t_116_;
_t_118_ += 8.0 * _t_119_;
_t_108_ -= la[k+1][j+2][i] * _t_118_;
double _t_107_ = _t_96_;
_t_93_ += _t_107_ * _t_108_;
b_r2 += _t_93_;
double _v_105_ = cof * b_r2;
double uacc_1kp1jc0ic0 = _v_105_;
uacc_1kp1jc0ic0 += a1 * uacc_1[k+1][j][i];
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_1[k+1][j][i] = uacc_1kp1jc0ic0;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_3 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 10
for (int k=2; k<=N-3; k++) {
mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k][j][i-2] - u_2[k][j][i]) + mux2 * (u_2[k][j][i-1] - u_2[k][j][i]) + mux3 * (u_2[k][j][i+1] - u_2[k][j][i]) + mux4 * (u_2[k][j][i+2] - u_2[k][j][i])) +
stry[j] * (muy1 * (u_2[k][j-2][i] - u_2[k][j][i]) + muy2 * (u_2[k][j-1][i] - u_2[k][j][i]) + muy3 * (u_2[k][j+1][i] - u_2[k][j][i]) + muy4 * (u_2[k][j+2][i] - u_2[k][j][i])) +
strz[k] * ((2 * muz1 + la[k-1][j][i] * strz[k-1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k-2][j][i] * strz[k-2]) * (u_2[k-2][j][i] - u_2[k][j][i]) +
(2 * muz2 + la[k-2][j][i] * strz[k-2] + la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k] + 3 * la[k-1][j][i] * strz[k-1]) * (u_2[k-1][j][i] - u_2[k][j][i]) +
(2 * muz3 + la[k-1][j][i] * strz[k-1] + la[k+2][j][i] * strz[k+2] + 3 * la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k]) * (u_2[k+1][j][i] - u_2[k][j][i]) +
(2 * muz4 + la[k+1][j][i] * strz[k+1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k+2][j][i] * strz[k+2]) * (u_2[k+2][j][i] - u_2[k][j][i])));
r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k][j][i-2] * (u_0[k-2][j][i-2] - u_0[k+2][j][i-2] + 8 * (-u_0[k-1][j][i-2] + u_0[k+1][j][i-2])) - 8 * (mu[k][j][i-1] * (u_0[k-2][j][i-1] - u_0[k+2][j][i-1] + 8 * (-u_0[k-1][j][i-1] + u_0[k+1][j][i-1]))) + 8 * (mu[k][j][i+1] * (u_0[k-2][j][i+1] - u_0[k+2][j][i+1] + 8 * (-u_0[k-1][j][i+1] + u_0[k+1][j][i+1]))) - (mu[k][j][i+2] * (u_0[k-2][j][i+2] - u_0[k+2][j][i+2] + 8 * (-u_0[k-1][j][i+2] + u_0[k+1][j][i+2]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k-2][j-2][i] - u_1[k+2][j-2][i] + 8 * (-u_1[k-1][j-2][i] + u_1[k+1][j-2][i])) - 8 * (mu[k][j-1][i] * (u_1[k-2][j-1][i] - u_1[k+2][j-1][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k+1][j-1][i]))) + 8 * (mu[k][j+1][i] * (u_1[k-2][j+1][i] - u_1[k+2][j+1][i] + 8 * (-u_1[k-1][j+1][i] + u_1[k+1][j+1][i]))) - (mu[k][j+2][i] * (u_1[k-2][j+2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k-1][j+2][i] + u_1[k+1][j+2][i]))));
r3 += strx[i] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_0[k-2][j][i-2] - u_0[k-2][j][i+2] + 8 * (-u_0[k-2][j][i-1] + u_0[k-2][j][i+1])) - 8 * (la[k-1][j][i] * (u_0[k-1][j][i-2] - u_0[k-1][j][i+2] + 8 * (-u_0[k-1][j][i-1] + u_0[k-1][j][i+1]))) + 8 * (la[k+1][j][i] * (u_0[k+1][j][i-2] - u_0[k+1][j][i+2] + 8 * (-u_0[k+1][j][i-1] + u_0[k+1][j][i+1]))) - (la[k+2][j][i] * (u_0[k+2][j][i-2] - u_0[k+2][j][i+2] + 8 * (-u_0[k+2][j][i-1] + u_0[k+2][j][i+1]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_1[k-2][j-2][i] - u_1[k-2][j+2][i] + 8 * (-u_1[k-2][j-1][i] + u_1[k-2][j+1][i])) - 8 * (la[k-1][j][i] * (u_1[k-1][j-2][i] - u_1[k-1][j+2][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k-1][j+1][i]))) + 8 * (la[k+1][j][i] * (u_1[k+1][j-2][i] - u_1[k+1][j+2][i] + 8 * (-u_1[k+1][j-1][i] + u_1[k+1][j+1][i]))) - (la[k+2][j][i] * (u_1[k+2][j-2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k+2][j-1][i] + u_1[k+2][j+1][i]))));
uacc_2[k][j][i] = a1 * uacc_2[k][j][i] + cof * r3;
}
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
cudaMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_1;
cudaMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_2;
cudaMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_0;
cudaMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_1;
cudaMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_2;
cudaMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
double *strz;
cudaMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
sw4_1 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
sw4_2 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
sw4_3 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (uacc_0);
cudaFree (uacc_1);
cudaFree (uacc_2);
cudaFree (u_0);
cudaFree (u_1);
cudaFree (u_2);
cudaFree (mu);
cudaFree (la);
cudaFree (strx);
cudaFree (stry);
cudaFree (strz);
}
|
124
|
#include "cuda_runtime.h"
#include "cuda.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
#define iceil(num, den) (num + den - 1) / den
#define ARRAY_SIZE 20 //must be an even number; this number/2 = number of points //sets random array and constant mem size
//#define BIN 100 //divides the grid into square bins to vote on. perfect square value
#define NUM_LINES 4 //top X voted lines. Picks first X Largest from top left to bottom right of grid space.
/*GRID evaluated for bin voting
* Must always be a square grid with origin at center
*/
#define dimension 5
#define LXBOUND (-1*dimension) //lowest X
#define RXBOUND (dimension) //highest X
#define LYBOUND (-1*dimension) //lowest Y
#define UYBOUND (dimension) //highest Y
////////////////////////////////
#define INCREMENT 1 //precision, length of 1 side of the square(bin)
//The (abs)difference between between two sides is the length of the grid. Length/Increment determines how many bins
#define column (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT)
__constant__ int d_coordarray[ARRAY_SIZE];//Place coordinates in constant memory
//show grid with votes. Becomes unuseful when bins > 20x20
void printVotes(int *h_binarray) {
// Number of columns
for (int i = 0; i < column; ++i) {
for (int j = 0; j < column * column; j += column)
std::cout << h_binarray[i + j] << "\t";
std::cout << std::endl;
}
}
// Convert from array index to representative slope
float slopeCalculator(int index) {
const int center = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT) / 2;
int displacement = 0, flag = 0;
int change = column;
//from the center, compare columns incrementing by column length until index is found
while (flag == 0) {
if (index <= center + change && index >= center - change) {
flag++;
} else {
change += column;
displacement++;
}
}
//gives the center horizontal value for the bin passed to this function
return (displacement * INCREMENT) + (INCREMENT / 2.0);
}
// Convert from array index to representative intercept
float interceptCalculator(int index) {
const int col = ((((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT));
const int check = index % col;//represents the displacement shifted into the first column
float displacement = 0.0, flag = 0;
int center1 = column / 2, center2 = column / 2 - 1;
//starting at middle 2 indices move up one and down one until check is found
while (flag == 0) {
((check == center1 || check == center2) ? flag : displacement)++;
center1++;
center2--;
}
//gives the center,vertical value for the bin passed to this function
return (float)(displacement * INCREMENT) + (INCREMENT / 2.0);
}
// Find n highest indexes in the array
void highest_index(int *h_binarray) {
const int size = ((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT);
const int col = (((RXBOUND - LXBOUND)*(RXBOUND - LXBOUND)) / ((RXBOUND + UYBOUND) * INCREMENT));
int *index = new int[size];
for (int i = 0; i < size; ++i)
index[i] = i;
bool stop = true;
int temp, temp2;
// Bubble sort
for (int i = 1; (i <= size) && stop; ++i) {
stop = false;
for (int j = 0; j < (size - 1); ++j) {
if (h_binarray[j + 1] > h_binarray[j]) {
temp = h_binarray[j];
temp2 = index[j];
h_binarray[j] = h_binarray[j + 1];
index[j] = index[j + 1];
h_binarray[j + 1] = temp;
index[j + 1] = temp2;
stop = true;
}
}
}
//use highest values for slope & intercept
float totalslope = 0.0, totalintercept = 0.0;
for (int i = 0; i < NUM_LINES; ++i) {
const float slope = slopeCalculator(index[i]);
const float intercept = interceptCalculator(index[i]);
std::cout << "[" << i << "]: ";
if (index[i] < (size / 2)) {
std::cout << "slope= -" << slope << " and " << std::endl;
totalslope = totalslope - slope;
} else {
std::cout << "slope = " << slope << " and " << std::endl;
totalslope = totalslope + slope;
}
if (index[i] % col < (col / 2)) {
std::cout << " and intercept = " << intercept << std::endl
<< "From point: " << index[i] << std::endl;
totalintercept = totalintercept + intercept;
} else {
std::cout << " and intercept = -" << intercept << std::endl
<< "From point: " << index[i] << std::endl;
totalintercept = totalintercept - intercept;
}
std::cout << "with value = " << h_binarray[i] << std::endl;
}
std::cout << "=============" << std::endl;
std::cout << "The average of these slopes is: " << totalslope / NUM_LINES << std::endl;
std::cout << "The average of these intercept is: " << totalintercept / NUM_LINES << std::endl;
std::cout << std::endl;
}
//kernel functions
__global__ void kernelHough(int size, int* d_binarray) {
/*
take a piece of the array. discretize into y=mx+b format per point. check all points and increment all bins touched
at the end recombine all shared memory to a global bin tally. Take the most significant X numbers as lines.
discretized from point(1,1) ==(m,n)==> (-1,1)
check each bin for count and sum them to a global array in sync
NUM of coordinates will check all bins for their own equation and increment appropriately
*/
// Number from 0 through arraysize / 2
const int thread = 2 * (blockDim.x * blockIdx.x + threadIdx.x);
// Slope is discretized space = -x
const float slope = -1.0 * d_coordarray[thread];
// Intercept in discretized space = y
const float intercept = d_coordarray[thread + 1];
int counter = 0;//keeps current array index being checked
//loop through entire graph
for (float x = LXBOUND; x < RXBOUND; x += INCREMENT) {
const float xMin = x;
const float xMax = x + INCREMENT;
for (float y = UYBOUND; y > LYBOUND; y -= INCREMENT) {
const float yMin = y - INCREMENT;
const float yMax = y;
//calculates possible y range associated with the known x range
const float lower_range = slope * xMin + intercept;
const float upper_range = slope * xMax + intercept;
//if the possible y ranges corresponding to the x values exist within the actual y range increment bin
if ((lower_range <= yMax && lower_range >= yMin) || (upper_range <= yMax && upper_range >= yMin))
atomicAdd(&d_binarray[counter], 1);//increment bin, protected from race condition
counter++;
}
}
}
//prep function
void houghTransform(int* h_input_array, int size) {
int *d_binarray;
int *h_binarray = new int[((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)];
// Length of the square grid for bins * size of int
const int binarraysize = (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) * sizeof(int);
const int coordarraysize = size * sizeof(int);
// Copy coordinates to Constant Memory
cudaMemcpyToSymbol(d_coordarray, h_input_array, coordarraysize);
cudaMalloc((void**)&d_binarray, binarraysize);
// 1-D Block
dim3 myBlockDim(1, 1, 1);
// ((size / 2), 1, 1); 1d grid
dim3 myGridDim((size/2), 1, 1);
kernelHough <<<myGridDim, myBlockDim>>> (size, d_binarray);
cudaMemcpy(h_binarray, d_binarray, binarraysize, cudaMemcpyDeviceToHost);
if(INCREMENT>=0.5)printVotes(h_binarray);
highest_index(h_binarray);
}
int main() {
// Seed RNG
srand(time(0));
// Test case array
int test[ARRAY_SIZE] = { 1,3,2,5,3,7,4,9,5,11,6,13,7,15,8,17,9,19,10,21};
// Random array initializer
int *random = new int[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; ++i)
random[i] = (rand() % 10) + 1;
// Begin test function
houghTransform(test, ARRAY_SIZE);
return 0;
}
|
125
|
#include "elementwise.cuh"
namespace {
template<class T>
__device__ T multiplies(const T &lhs, const T &rhs) { return lhs * rhs; }
template<class T>
__device__ T plus(const T &lhs, const T &rhs) { return lhs + rhs; }
}
namespace kernels {
template<class T>
__global__ void vector_add_cuda(const T* const a, const T* const b, T* dest, int size) {
const auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
dest[i] = a[i] + b[i];
}
}
template<class T>
__global__ void vector_mul_cuda(const T* const a, const T* const b, T* dest, int size) {
const auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
dest[i] = a[i] * b[i];
}
}
template<class T>
void vector_add(const NDBuffer<T>& a, const NDBuffer<T>& b, NDBuffer<T>& dest) {
const size_t block_size = 256;
const int size = a.size();
vector_add_cuda<<<std::ceil(1.0 * size / block_size), block_size>>>(a.ptr(), b.ptr(), dest.ptr(), size);
}
template<class T>
void vector_multiply(const NDBuffer<T>& a, const NDBuffer<T>& b, NDBuffer<T>& dest) {
const size_t block_size = 256;
const int size = a.size();
vector_mul_cuda<<<std::ceil(1.0 * size / block_size), block_size>>>(a.ptr(), b.ptr(), dest.ptr(), size);
}
template void vector_add<float>(const NDBuffer<float>& a, const NDBuffer<float>& b, NDBuffer<float>& dest);
template void vector_multiply<float>(const NDBuffer<float>& a, const NDBuffer<float>& b, NDBuffer<float>& dest);
}
|
126
|
#include "includes.h"
__global__ void kRMSProp(float *history, float *grad, float factor, int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
history[i] = sqrt(factor * history[i] * history[i] + (1-factor) * grad[i] * grad[i]);
}
}
|
127
|
//-nvcc -arch=sm_11 -m64 -O3 main.cu -o atomic.bin
#include<iostream>
#include<cstdlib>
#include <cuda_runtime.h>
#include <cassert>
#include <vector>
#define CHECK_ERROR(call) do { \
if( cudaSuccess != call) { \
std::cerr << std::endl << "CUDA ERRO: " << \
cudaGetErrorString(call) << " in file: " << __FILE__ \
<< " in line: " << __LINE__ << std::endl; \
exit(0); \
} } while (0)
__global__
void kernel (int *vet, int *flag){
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
vet[index] = index + 1;
if (threadIdx.x == 0)
atomicAdd(&flag[0], 1);
}
using namespace std;
int main(int argc, char *argv[]){
int dominio = 32,
threads = 4;
vector <int> h_vet;
int *d_Vet = NULL,
*d_Flag = NULL;
cout << "\nOperacao atomica\n";
//Reset no device
CHECK_ERROR(cudaDeviceReset());
//Alocando memória
h_vet.resize(dominio);
cudaMalloc(reinterpret_cast<void**> (&d_Vet), dominio * sizeof(int));
cudaMalloc(reinterpret_cast<void**> (&d_Flag), 1 * sizeof(int));
//Inicializando variáveis
bzero(&(h_vet[0]), dominio * sizeof(float));
CHECK_ERROR(cudaMemset(d_Vet, 0, dominio * sizeof(int)));
CHECK_ERROR(cudaMemset(d_Flag, 0, 1 * sizeof(int)));
int blocos = dominio / threads;
cout << "Blocos: " << blocos << endl;
cout << "Threads: " << threads << endl;
kernel<<<blocos, threads>>> (d_Vet, d_Flag);
CHECK_ERROR(cudaDeviceSynchronize());
cudaMemcpy(&(h_vet[0]), d_Vet, dominio * sizeof(int), cudaMemcpyDeviceToHost);
for (int k = 0; k < dominio; k++)
cout << h_vet[k] << endl;
cout << endl;
cudaMemcpy(&(h_vet[0]), d_Flag, 1 * sizeof(int), cudaMemcpyDeviceToHost);
cout << "cada thread[0] soma 1: " << h_vet[0] << endl;
cudaFree(d_Vet);
cudaFree(d_Flag);
return EXIT_SUCCESS;
}
|
128
|
#include <stdio.h>
#include <string>
#include <stdlib.h>
#include <cstdio>
#include <sstream>
#include <iostream>
#include <cuda.h>
#include <cmath>
//initialize 2 5 x 5 matrices represented as an array of floats
//each of the entry is equal to its position (i.e. A_00 = 0, A_01 = 1, A_44 = 24)
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
//enumeration for matrix function
#define MATRIX_ADD 0
#define MATRIX_SUB 1
#define MATRIX_MUL 2
const int Width = 5; //matrix dimension
const int TILE_WIDTH = 2; //tile size
const int size = Width*Width*sizeof(float); //memory size of a matrix
float * Md,* Nd,* Pd;
void checkCUDAError(const char *msg, int line = -1)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
if( line >= 0 )
{
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
__global__ void parallel_matrix_add(float * Md, float * Nd, float * Ad)
{
/*int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
float Avalue = 0;
for (int k = 0; k < width; ++k)
Avalue += Md[Row * width + k] * Nd[k * width + Col];
Ad[Row * width + Col] = Avalue;*/
int tx = threadIdx.x;
int ty = threadIdx.y;
Ad[ty * Width + tx] = Md[ty * Width + tx ] + Nd[ty * Width + tx];
}
__global__ void parallel_matrix_sub(float * Md, float * Nd, float * Ad)
{
/*int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
float Avalue = 0;
for (int k = 0; k < width; ++k)
Avalue += Md[Row * width + k] * Nd[k * width + Col];
Ad[Row * width + Col] = Avalue;*/
int tx = threadIdx.x;
int ty = threadIdx.y;
Ad[ty * Width + tx] = Md[ty * Width + tx ] - Nd[ty * Width + tx];
}
__global__ void parallel_matrix_mul( float* Md, float* Nd, float* Pd)
{
/*int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
float Pvalue = 0;
for (int k = 0; k < Width; ++k)
Pvalue += Md[Row * Width + k] * Nd[k * Width + Col];
Pd[Row * Width + Col] = Pvalue;*/
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0;
for(int k=0; k<Width; ++k){
Pvalue += Md[ty * Width + k] * Nd[k * Width + tx];
}
Pd[ty * Width + tx] = Pvalue;
}
//CUDA Data Transfer & parallel versioin
void MatrixOnDevice(float *M, float *N, float *P, int width, int operation)
{
//load Md and Nd to device memory
cudaMalloc((void**)&Md, size);
checkCUDAErrorWithLine("Kernel failed!");
cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&Nd, size);
checkCUDAErrorWithLine("Kernel failed!");
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
checkCUDAErrorWithLine("Kernel failed!");
//alocate Pon the device
cudaMalloc((void**)&Pd, size);
checkCUDAErrorWithLine("Kernel failed!");
//kernel invocation
//dim3 dimGrid(Width/ TILE_WIDTH, Width/ TILE_WIDTH);
//dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid(1,1);
dim3 dimBlock(Width, Width);
switch(operation){
case MATRIX_ADD:
parallel_matrix_add<<<dimGrid, dimBlock>>>(Md, Nd, Pd);
break;
case MATRIX_SUB:
parallel_matrix_sub<<<dimGrid, dimBlock>>>(Md, Nd, Pd);
break;
case MATRIX_MUL:
parallel_matrix_mul<<<dimGrid, dimBlock>>>(Md, Nd, Pd);
break;
}
//read P from device
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("Kernel failed!");
//free memory
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
//serial version
void MatrixOnHost(float *M, float *N, float *P, int width, int operation)
{
switch(operation){
case MATRIX_ADD:
for( int i = 0; i< width; i++){
for(int j=0; j<width; j++){
P[i*width+j] = M[i*width+j] + N[i*width+j];
}
}
break;
case MATRIX_SUB:
for( int i = 0; i< width; i++){
for(int j=0; j<width; j++){
P[i*width+j] = M[i*width+j] - N[i*width+j];
}
}
break;
case MATRIX_MUL:
for( int i = 0; i< width; i++){
for(int j=0; j<width; j++){
float sum = 0;
for (int k=0; k<width; k++){
sum += M[i*width+k] *N[k*width+j];
}
P[i*width+j] = sum;
}
}
break;
}
}
int main(int argc, char** argv)
{
//initialize the entry matrix
float * entry, * entry1, * entry2, *entry3;
entry = new float[Width*Width];
entry1 = new float[Width*Width];
entry2 = new float[Width*Width];
entry3 = new float[Width*Width];
for(int k=0; k<Width*Width; k++){
entry[k] = k;
//std::cout<<k<<std::endl;
}
entry1[0]=2; entry1[1]=1; entry1[2]=2; entry1[3]=1; entry1[4]=2;
entry1[5]=4; entry1[6]=6; entry1[7]=1; entry1[8]=7; entry1[9]=1;
entry1[10]=3; entry1[11]=3; entry1[12]=11; entry1[13]=2; entry1[14]=11;
entry1[15]=18; entry1[16]=10; entry1[17]=9; entry1[18]=8; entry1[19]=12;
entry1[20]=8; entry1[21]=5; entry1[22]=3; entry1[23]=14; entry1[24]=20;
entry2[0]=5; entry2[1]=10; entry2[2]=20; entry2[3]=11; entry2[4]=2;
entry2[5]=14; entry2[6]=3; entry2[7]=12; entry2[8]=17; entry2[9]=7;
entry2[10]=2; entry2[11]=7; entry2[12]=10; entry2[13]=5; entry2[14]=6;
entry2[15]=20; entry2[16]=3; entry2[17]=8; entry2[18]=13; entry2[19]=4;
entry2[20]=18; entry2[21]=15; entry2[22]=9; entry2[23]=8; entry2[24]=2;
/*std::cout<<"************Initial: ************"<<std::endl;
for(int i=0; i<Width; i++){
std::stringstream ss;
for(int j=0; j<Width; j++){
ss << entry[i*Width+j] <<" ";
}
std::cout<<ss.str()<<std::endl;
}*/
float * result; //pointer to the result
result = new float[Width*Width];
//host call for addition
MatrixOnDevice(entry, entry, result, Width, MATRIX_ADD);
std::cout<<"************Deivce Add Result: ************"<<std::endl;
for(int i=0; i<Width; i++){
std::stringstream ss;
for(int j=0; j<Width; j++){
ss << result[i*Width+j] <<" ";
}
std::cout<<ss.str()<<std::endl;
}
//host call for subtraction
MatrixOnDevice(entry, entry, result, Width, MATRIX_SUB);
std::cout<<"************Deivce Sub Result: ************"<<std::endl;
for(int i=0; i<Width; i++){
std::stringstream ss;
for(int j=0; j<Width; j++){
ss << result[i*Width+j] <<" ";
}
std::cout<<ss.str()<<std::endl;
}
//host call for multiply
MatrixOnDevice(entry, entry, result, Width, MATRIX_MUL);
std::cout<<"************Deivce Mul Result: ************"<<std::endl;
for(int i=0; i<Width; i++){
std::stringstream ss;
for(int j=0; j<Width; j++){
ss << result[i*Width+j] <<" ";
}
std::cout<<ss.str()<<std::endl;
}
MatrixOnHost(entry, entry, result, Width, MATRIX_ADD);
std::cout<<"************Host Add Result: ************"<<std::endl;
for(int i=0; i<Width; i++){
std::stringstream ss;
for(int j=0; j<Width; j++){
ss << result[i*Width+j] <<" ";
}
std::cout<<ss.str()<<std::endl;
}
MatrixOnHost(entry, entry, result, Width, MATRIX_SUB);
std::cout<<"************Host Sub Result: ************"<<std::endl;
for(int i=0; i<Width; i++){
std::stringstream ss;
for(int j=0; j<Width; j++){
ss << result[i*Width+j] <<" ";
}
std::cout<<ss.str()<<std::endl;
}
MatrixOnHost(entry, entry, result, Width, MATRIX_MUL);
std::cout<<"************Host Mul Result: ************"<<std::endl;
for(int i=0; i<Width; i++){
std::stringstream ss;
for(int j=0; j<Width; j++){
ss << result[i*Width+j] <<" ";
}
std::cout<<ss.str()<<std::endl;
}
return 0;
}
|
129
|
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* nvcc -o lr_reg_cuda_071 lr_reg_cuda_071.cu -lm
*
* To run:
* ./lr_reg_cuda_071
*
*****************************************************************************/
typedef struct point_t {
double a;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
//actual data
point_t data[] = {
{65.77,103.17},{66.66,115.07},{68.04,113.77},{73.02,110.29},
{82.45,135.91},{73.76,103.04},{90.76,148.34},{65.33,97.44},
{75.33,112.93},{72.32,108.50},{73.09,114.08},{82.21,133.12},
{83.75,131.80},{73.91,106.80},{60.25,88.80},{83.96,153.61},
{92.69,144.22},{32.54,68.18},{71.86,115.82},{56.19,103.37},
{78.68,127.29},{ 0.35,31.54},{28.09,85.61},{ 3.09,20.34},
{18.38,83.70},{29.84,77.58},{76.85,118.94},{63.64,92.79},
{61.50,106.92},{85.08,140.96},{27.72,59.21},{30.82,63.24},
{18.91,69.26},{12.29,47.23},{11.13,47.68},{89.13,145.64},
{14.92,42.34},{97.39,152.14},{61.64,134.55},{78.64,125.56},
{12.30,37.91},{ 1.63,23.71},{84.73,137.51},{71.00,107.53},
{41.16,73.61},{30.61,79.11},{66.18,106.89},{ 0.41,16.82},
{45.86,69.48},{27.65,65.35},{72.68,137.84},{14.27,37.45},
{44.51,84.86},{46.35,76.14},{37.69,88.49},{49.26,74.66},
{ 8.18,53.70},{99.85,163.82},{26.59,54.90},{51.94,109.76},
{70.89,118.08},{ 3.18,48.75},{ 7.80,49.29},{83.02,114.45},
{92.82,157.59},{24.80,69.54},{38.36,73.69},{93.60,154.66},
{70.53,122.81},{37.67,61.82},{58.06,102.25},{ 3.65,41.44},
{52.43,96.34},{65.23,121.17},{57.84,102.28},{44.35,64.02},
{85.82,132.38},{50.53,87.74},{88.17,144.48},{97.23,146.11},
{55.53,99.61},{35.08,98.07},{89.75,140.88},{66.73,108.88},
{38.06,80.05},{26.92,37.08},{45.46,94.12},{81.64,134.65},
{75.28,106.90},{16.76,58.23},{59.42,97.67},{20.75,71.57},
{34.08,75.69},{36.98,70.84},{67.55,110.33},{76.72,137.76},
{44.57,83.73},{30.68,69.29},{49.51,66.07},{19.17,48.65},
{61.23,108.20},{ 8.92,54.31},{42.48,95.04},{80.79,140.36},
{64.87,112.87},{ 8.13,37.56},{59.77,97.27},{16.26,65.89},
{38.06,83.18},{71.88,116.56},{16.34,54.18},{17.30,60.03},
{42.92,90.89},{70.44,129.42},{50.64,73.67},{72.85,124.06},
{100.00,154.50},{23.62,63.11},{45.41,85.47},{73.11,127.97},
{97.32,148.97},{85.22,132.78},{99.01,140.74},{61.07,102.76},
{12.47,49.19},{43.05,90.06},{33.18,77.98},{47.45,73.35},
{98.70,131.60},{ 7.81,52.51},{89.99,135.38},{74.26,120.60},
{62.79,98.80},{67.59,102.28},{46.73,102.80},{28.14,61.02},
{83.41,126.10},{48.71,96.02},{69.36,125.89},{89.15,154.47},
{45.02,78.14},{57.09,95.26},{ 4.13,36.98},{74.37,111.26},
{84.96,148.19},{42.52,95.53},{53.26,99.68},{55.32,92.34},
{27.31,74.02},{93.08,139.09},{82.76,113.47},{31.92,83.12},
{15.06,60.05},{71.29,125.77},{51.73,102.28},{19.49,63.28},
{18.44,59.18},{38.28,67.34},{96.08,150.36},{ 4.14,35.67},
{65.91,110.67},{ 1.10,23.11},{42.66,92.22},{76.20,118.66},
{90.15,122.88},{17.56,33.36},{ 8.27,36.60},{70.83,114.31},
{59.74,122.20},{77.04,141.71},{85.19,142.04},{49.98,106.00},
{11.94,51.96},{19.02,54.53},{45.42,85.57},{85.87,133.83},
{75.84,85.03},{66.20,129.78},{60.95,106.32},{42.68,78.93},
{15.55,36.11},{80.36,123.79},{15.48,47.20},{26.98,62.69},
{13.26,53.55},{97.72,150.14},{42.85,86.35},{61.01,110.77},
{10.24,41.83},{55.05,99.23},{47.51,92.32},{90.34,142.59},
{83.50,130.13},{21.99,62.66},{ 9.66,21.35},{83.86,150.40},
{74.14,115.92},{65.81,107.08},{39.73,79.82},{34.41,74.53},
{ 0.16,16.92},{92.50,127.79},{22.29,68.43},{79.27,127.22},
{ 8.54,40.84},{71.21,114.90},{49.00,64.75},{ 1.05,34.94},
{46.59,93.15},{80.89,133.51},{99.58,156.24},{53.00,97.62},
{83.79,129.39},{ 3.77,23.48},{76.98,125.41},{87.12,131.66},
{32.34,74.83},{51.69,112.98},{53.35,117.18},{92.45,151.77},
{84.49,130.86},{84.05,120.72},{71.63,122.69},{47.11,86.93},
{37.29,63.74},{74.57,125.43},{76.22,127.12},{38.23,63.11},
{88.49,153.64},{66.60,122.56},{78.47,119.13},{65.91,107.44},
{ 9.25,25.09},{39.86,80.09},{47.55,94.87},{98.61,151.74},
{ 2.69,29.18},{26.41,54.13},{86.16,115.41},{29.62,56.51},
{76.24,104.44},{70.46,125.62},{28.43,66.37},{14.35,48.26},
{16.82,59.15},{80.56,121.00},{65.33,107.35},{14.10,60.46},
{92.67,149.12},{99.60,156.95},{26.30,57.33},{65.01,100.52},
{74.16,113.75},{35.78,75.41},{40.38,70.56},{77.36,116.87},
{10.85,32.42},{18.97,54.56},{82.43,135.12},{95.14,151.90},
{46.39,85.91},{90.92,114.09},{ 5.48,38.41},{74.40,121.21},
{70.05,111.64},{24.52,51.25},{10.75,41.62},{68.75,122.83},
{87.86,130.06},{85.31,133.71},{53.60,117.89},{ 4.22,33.62},
{44.05,85.89},{25.84,57.66},{74.65,121.53},{94.83,148.35},
{99.49,157.77},{86.18,125.42},{44.00,94.04},{83.42,131.71},
{43.19,73.85},{69.31,111.56},{64.80,116.99},{ 1.40,15.37},
{69.99,126.03},{71.02,101.94},{16.05,35.13},{22.73,60.64},
{84.12,137.98},{97.45,147.54},{86.69,128.34},{71.59,109.89},
{43.47,73.00},{30.52,76.86},{78.60,126.19},{ 9.69,37.95},
{65.41,105.16},{ 2.76,42.98},{39.41,77.67},{55.34,89.06},
{83.51,132.52},{39.28,67.13},{36.30,59.54},{18.15,55.98},
{58.92,126.54},{75.95,126.29},{95.26,166.01},{97.87,132.00},
{60.32,103.19},{21.41,62.93},{ 9.25,61.15},{78.05,125.47},
{28.77,83.14},{81.59,140.53},{90.25,138.17},{56.87,93.23},
{63.07,111.84},{33.74,65.37},{52.69,106.62},{94.46,133.90},
{17.98,43.25},{ 6.34,54.08},{19.22,47.62},{42.61,82.96},
{71.03,128.26},{ 4.48,39.77},{97.45,154.07},{57.89,89.07},
{ 7.30,34.80},{95.20,145.28},{16.32,59.02},{19.14,49.39},
{73.62,108.80},{70.30,89.41},{57.92,116.39},{ 4.81,36.00},
{98.09,136.32},{64.91,112.36},{63.37,109.41},{20.60,73.03},
{74.33,104.33},{44.04,68.55},{ 5.44,44.34},{30.21,70.08},
{ 6.75,43.07},{34.15,82.16},{75.01,111.84},{45.88,96.30},
{37.32,83.45},{48.08,88.76},{ 6.04,25.92},{88.36,163.22},
{67.05,115.71},{29.27,57.17},{13.76,47.86},{45.10,91.57},
{54.08,100.19},{59.82,91.03},{42.94,91.46},{ 5.54,47.67},
{36.43,95.08},{71.10,122.78},{57.26,102.31},{20.03,53.70},
{80.26,142.83},{56.95,115.65},{83.29,132.26},{ 7.29,41.92},
{52.40,99.03},{10.83,40.13},{72.84,133.72},{ 5.75,45.32},
{63.33,116.96},{10.21,29.81},{29.53,58.29},{26.63,52.33},
{21.40,42.72},{41.13,103.43},{75.96,122.99},{82.66,122.12},
{68.22,114.37},{82.13,123.21},{82.87,145.94},{27.83,59.69},
{81.36,142.95},{40.40,65.98},{74.99,140.34},{39.78,71.88},
{24.15,58.28},{26.33,43.97},{89.59,141.88},{19.37,45.91},
{66.67,110.53},{13.73,39.48},{33.23,79.42},{11.64,51.42},
{35.64,56.43},{98.65,148.80},{43.14,88.28},{90.56,138.01},
{71.74,114.79},{ 2.56,40.33},{24.94,55.18},{ 2.53,36.32},
{87.63,144.73},{60.09,96.77},{66.32,107.28},{ 8.84,38.99},
{49.49,86.62},{ 7.20,42.52},{59.06,64.49},{94.03,164.06},
{28.25,71.25},{11.01,64.51},{40.75,75.34},{10.11,45.78},
{87.18,134.04},{78.23,121.22},{82.65,134.80},{ 3.01,46.72},
{98.94,152.06},{ 7.24,43.79},{39.01,84.74},{75.28,125.76},
{60.43,93.21},{ 2.17,31.11},{ 1.70,27.34},{79.50,122.85},
{29.45,66.44},{97.41,143.46},{45.40,99.21},{29.56,68.15},
{72.21,128.50},{95.95,139.59},{94.42,157.34},{63.19,98.08},
{11.72,41.20},{20.63,59.08},{81.00,130.44},{18.63,52.35},
{43.88,72.93},{20.05,61.43},{85.75,144.94},{18.59,43.76},
{46.77,96.89},{59.97,98.22},{26.51,64.44},{65.33,115.61},
{14.10,55.66},{91.72,142.97},{30.37,74.01},{71.64,123.92},
{29.39,56.18},{59.86,96.16},{12.70,62.20},{25.02,62.19},
{59.02,108.29},{ 7.84,50.88},{21.26,63.46},{53.63,106.03},
{14.68,37.16},{80.88,140.53},{ 8.41,55.50},{60.80,109.26},
{23.24,59.87},{91.98,149.68},{ 9.04,38.33},{46.28,92.27},
{40.68,81.85},{36.51,63.35},{20.70,57.45},{14.41,41.92},
{50.88,112.55},{47.65,85.11},{52.26,84.29},{64.40,108.21},
{59.21,92.34},{20.78,57.55},{91.44,139.58},{59.40,100.03},
{59.72,111.63},{50.07,88.22},{64.12,94.18},{ 5.54,60.74},
{30.26,52.57},{58.99,100.15},{ 7.07,31.74},{94.93,151.52},
{71.85,113.02},{87.67,123.00},{ 5.55,49.48},{79.29,118.08},
{20.34,48.62},{ 0.48,24.98},{36.22,91.53},{17.20,53.74},
{29.60,61.04},{56.08,107.97},{54.47,103.19},{69.35,115.07},
{ 4.76,32.31},{45.07,97.83},{14.53,66.36},{21.65,54.46},
{38.73,83.16},{80.96,118.73},{57.11,102.90},{36.82,78.69},
{47.69,68.78},{ 0.05,29.29},{33.27,85.91},{38.91,96.94},
{31.80,72.19},{91.87,150.57},{23.74,63.45},{63.51,126.96},
{97.61,163.28},{32.15,84.64},{84.90,128.78},{95.62,140.15},
{ 7.99,35.24},{70.87,104.87},{86.19,130.50},{20.84,49.97},
{24.74,61.46},{91.05,144.13},{ 0.98,25.98},{32.50,66.16},
{14.44,57.60},{22.42,66.02},{91.38,142.46},{19.84,57.17},
{33.62,67.50},{58.57,103.35},{25.13,72.83},{17.31,34.97},
{76.99,110.70},{71.86,128.58},{17.29,63.02},{94.08,129.27},
{50.43,90.01},{23.63,61.34},{67.11,110.97},{14.88,44.54},
{37.76,72.45},{92.88,140.30},{78.57,127.46},{68.67,107.11},
{25.33,53.83},{31.06,67.79},{ 6.69,27.38},{12.30,50.66},
{26.00,67.45},{25.89,69.85},{22.50,61.54},{72.63,118.25},
{76.90,136.66},{24.37,71.36},{47.29,87.92},{30.76,82.17},
{70.83,119.63},{37.67,67.23},{ 2.92,27.50},{71.40,112.23},
{15.30,43.37},{52.29,103.00},{63.47,111.07},{12.99,59.26},
{60.71,97.46},{90.70,164.44},{25.41,67.64},{47.78,94.72},
{41.06,79.19},{59.46,99.82},{37.09,89.59},{19.52,47.87},
{24.92,70.44},{49.14,88.71},{53.69,101.00},{97.36,165.67},
{88.73,151.39},{43.66,67.63},{22.57,47.01},{77.66,124.39},
{90.58,123.38},{32.42,75.76},{26.47,64.65},{97.98,148.35},
{74.32,124.99},{45.54,87.44},{60.62,86.97},{36.59,74.95},
{ 2.65,39.52},{85.56,124.04},{16.05,47.77},{96.80,128.91},
{30.03,69.82},{57.59,89.24},{98.12,146.14},{62.42,102.56},
{17.52,50.54},{40.80,72.44},{18.65,48.68},{34.59,73.93},
{93.03,146.90},{22.68,70.39},{47.00,86.77},{49.78,116.04},
{40.08,75.36},{22.91,48.48},{71.74,98.57},{78.77,121.33},
{42.69,80.48},{59.05,113.31},{42.85,94.04},{56.53,125.57},
{81.31,136.85},{13.86,43.15},{22.44,55.97},{ 1.24,25.86},
{89.18,141.93},{83.07,127.94},{32.33,75.70},{41.94,99.04},
{71.08,115.94},{32.78,70.54},{87.40,142.53},{61.36,96.57},
{54.66,87.78},{53.19,106.08},{53.54,100.35},{27.55,69.39},
{50.02,95.74},{69.10,123.43},{87.24,135.44},{61.57,106.46},
{45.19,79.09},{34.40,75.29},{88.91,124.80},{75.33,139.61},
{72.72,112.59},{24.58,64.69},{35.28,55.38},{35.88,79.24},
{ 0.47,16.66},{70.32,114.69},{53.21,110.96},{ 6.04,52.43},
{35.03,76.40},{29.59,71.85},{47.27,90.89},{11.21,59.47},
{17.03,70.47},{85.69,131.92},{12.54,65.73},{29.02,85.10},
{37.88,68.83},{33.51,77.61},{37.69,73.04},{42.90,71.74},
{ 2.98,23.38},{36.21,77.22},{21.57,72.38},{ 9.36,53.03},
{80.31,136.00},{12.10,43.60},{95.79,144.53},{96.39,158.95},
{15.98,28.75},{ 8.48,40.43},{74.62,131.88},{85.68,132.76},
{85.15,143.59},{57.94,105.64},{26.56,76.48},{15.12,56.58},
{97.62,136.54},{56.90,105.77},{57.28,107.05},{70.58,131.09},
{24.56,77.07},{45.24,84.88},{86.06,139.75},{80.19,135.38},
{ 4.40,46.01},{ 0.80,37.36},{18.67,32.66},{49.93,100.09},
{67.17,108.56},{78.10,141.56},{71.44,112.18},{58.19,110.45},
{ 2.13,36.72},{82.52,105.53},{96.94,158.48},{47.74,94.69},
{72.48,119.25},{52.67,83.24},{35.07,66.27},{82.09,118.09},
{65.44,126.96},{66.96,121.99},{ 7.94,30.47},{ 8.51,42.20},
{71.37,101.37},{67.76,100.37},{26.35,76.18},{ 7.28,40.36},
{90.59,157.93},{ 7.39,50.31},{56.78,102.18},{34.68,61.38},
{41.17,73.70},{53.35,100.09},{73.75,126.89},{96.48,148.31},
{73.33,129.61},{78.59,126.46},{58.30,102.73},{20.20,40.65},
{46.85,104.85},{ 9.04,42.25},{99.42,159.59},{93.21,125.42},
{29.58,54.15},{29.32,60.87},{15.22,37.74},{35.23,68.65},
{44.57,90.12},{62.59,117.43},{21.16,77.27},{21.75,62.81},
{97.07,172.71},{48.24,104.32},{73.52,117.27},{59.40,90.80},
{55.95,123.10},{ 6.78,31.26},{50.66,99.87},{ 7.12,43.71},
{85.29,137.75},{22.16,60.48},{98.65,124.55},{15.33,44.09},
{90.07,141.91},{66.01,111.34},{59.90,101.65},{38.76,86.38},
{14.03,57.19},{96.81,141.87},{ 8.88,42.66},{80.24,116.28},
{57.50,90.11},{75.21,144.04},{99.00,150.86},{ 8.77,57.59},
{84.30,128.82},{61.42,109.16},{15.52,46.98},{36.42,95.48},
{73.77,135.33},{33.09,72.22},{87.52,144.03},{50.41,105.91},
{79.08,119.14},{44.87,104.66},{82.90,128.20},{45.96,110.94},
{96.42,134.11},{45.65,87.51},{55.77,111.11},{39.74,67.97},
{12.29,41.79},{49.43,70.04},{99.59,168.88},{69.91,95.39},
{24.39,76.42},{82.74,127.00},{50.11,80.19},{ 5.93,39.53},
{27.35,74.87},{ 9.68,38.49},{71.77,113.22},{91.43,136.23},
{70.68,126.01},{45.76,89.37},{ 9.24,19.89},{12.96,44.06},
{77.94,120.32},{23.29,56.16},{46.94,78.21},{87.95,125.36},
{35.77,68.55},{90.25,130.78},{77.52,132.04},{68.83,120.34},
{54.98,93.74},{87.06,121.11},{58.35,110.51},{65.50,114.36},
{36.78,98.07},{48.58,85.19},{46.76,109.92},{ 2.44,44.53},
{33.74,80.06},{ 0.05,39.32},{39.16,79.28},{42.61,87.78},
{48.08,93.16},{91.66,136.57},{29.92,70.97},{36.89,77.40},
{56.86,99.42},{ 1.75,44.22},{20.50,57.55},{61.32,113.99},
{69.97,112.34},{15.51,60.90},{80.22,128.70},{85.77,139.29},
{10.82,43.40},{ 4.92,39.18},{58.21,111.46},{ 5.81,45.55},
{85.88,154.43},{93.93,136.64},{66.94,123.28},{53.59,77.42},
{ 8.99,42.55},{23.88,67.91},{ 6.69,13.44},{37.62,91.90},
{29.27,61.26},{ 6.29,35.74},{46.27,84.48},{14.91,50.12},
{29.06,62.32},{70.16,100.79},{26.91,55.01},{75.22,127.27},
{95.04,158.75},{24.10,69.52},{80.01,109.62},{24.25,56.81},
{14.83,56.47},{75.28,126.37},{30.86,73.36},{94.36,144.53},
{61.19,109.52},{91.16,141.24},{74.87,103.78},{ 6.31,54.19},
{11.19,58.23},{12.03,60.78},{ 8.44,41.31},{56.11,89.24},
{21.02,53.45},{38.93,74.13},{93.64,145.20},{94.31,137.23},
{44.12,80.44},{ 7.10,46.16},{53.60,76.43},{34.92,66.94},
{52.58,110.90},{ 3.05,41.00},{64.02,96.84},{14.26,52.94},
{17.38,55.65},{71.95,129.09},{62.67,116.49},{60.56,98.66},
{50.05,82.96},{87.39,140.29},{69.83,104.48},{39.62,68.16},
{24.56,71.93},{33.32,81.02},{87.68,136.71},{79.02,120.89},
{43.41,94.81},{97.98,152.76},{ 8.22,54.90},{28.60,50.79},
{39.47,91.32},{84.90,141.70},{39.51,86.98},{56.69,90.42},
{91.25,138.19},{52.51,95.03},{88.84,138.39},{49.43,93.68},
{68.29,112.72},{83.78,127.27},{74.39,113.84},{33.33,76.97},
{79.09,115.54},{ 1.71,20.25},{96.94,127.17},{45.37,90.65},
{47.12,91.24},{63.30,107.76},{65.19,106.23},{27.84,68.58},
{71.13,120.42},{15.18,46.12},{30.56,79.90},{21.69,61.65},
{76.01,127.67},{94.67,135.84},{58.92,110.17},{42.06,99.29},
{35.78,76.41},{13.09,36.91},{79.82,129.03},{13.47,38.05},
{ 0.47,44.26},{42.94,95.60},{ 5.91,33.28},{14.51,41.14},
{21.93,53.87},{87.89,132.77},{47.35,75.25},{57.42,112.81},
{90.13,133.24},{12.77,46.29},{47.37,100.83},{ 4.88,30.32},
{21.27,63.80},{78.44,126.29},{95.77,142.11},{40.88,83.83},
{74.79,128.38},{61.52,118.65},{67.51,117.23},{78.36,130.30},
{63.96,98.76},{96.38,153.44},{47.88,70.40},{14.40,43.37},
{ 8.52,43.81},{ 6.44,26.98},{18.50,35.08},{74.78,122.99},
{67.55,123.19},{37.45,71.36},{ 3.85,18.35},{38.84,73.52},
{29.51,61.93},{38.63,84.42},{77.63,128.96},{75.18,128.67},
{71.28,120.48},{ 5.83,61.37},{27.02,79.54},{79.03,120.53},
{79.22,124.19},{58.20,104.06},{38.59,98.95},{42.16,82.21},
{83.70,123.90},{36.89,75.18},{47.77,94.42},{80.30,118.38},
{ 0.42,34.00},{74.55,130.64},{ 9.88,31.50},{64.72,123.00},
{39.60,75.94},{68.76,112.79},{72.69,114.21},{58.20,90.93},
{94.21,128.80},{69.74,119.12},{99.48,150.63},{63.85,119.08},
{54.56,99.61},{97.73,142.85},{62.48,85.27},{57.76,111.23},
{55.21,107.38},{89.40,132.81},{66.27,141.28},{25.08,56.02},
{67.03,108.34},{83.70,128.19},{56.98,95.40},{87.17,148.70},
{21.31,51.59},{ 7.56,42.71},{50.45,81.20},{50.50,100.72},
{98.22,151.25},{49.49,76.59},{39.03,84.60},{90.03,140.25},
{97.86,162.06},{80.83,129.97},{88.18,129.07},{64.29,95.35}
};
double residual_error(double a, double y, double m, double c) {
double e = (m * a) + c - y;
return e * e;
}
__device__ double d_residual_error(double a, double y, double m, double c) {
double e = (m * a) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].a, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
/*
Calculate the current index by using:
- The thread id
- The block id
- The number of threads per block
*/
int i = threadIdx.x + blockIdx.x * blockDim.x;
//Work out the error sum 1000 times and store them in an array.
error_sum_arr[i] = d_residual_error(d_data[i].a, d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
//Get the system time before we begin the linear regression.
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
//Device variables
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
//Allocate memory for d_dm
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_dc
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_error_sum_arr
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_data
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
//Copy memory for dm to d_dm
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
//Copy memory for dc to d_dc
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
//Copy memory for data to d_data
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
//Host variable storing the array returned from the kernel function.
double h_error_sum_arr[1000];
//Stores the total sum of the values from the error sum array.
double error_sum_total;
//Stores the mean of the total sum of the error sums.
double error_sum_mean;
//Call the rms_error function using 100 blocks and 10 threads.
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
//Copy memory for d_error_sum_arr
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
//Loop through the error sum array returned from the kernel function
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
//Calculate the mean for the error sum.
error_sum_mean = error_sum_total / n_data;
//Calculate the square root for the error sum mean.
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
//printf("best m,c is %lf,%lf with error %lf in direction %d\n",
//dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//Free memory for d_dm
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_dc
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_data
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_error_sum_arr
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
//Get the system time after we have run the linear regression function.
clock_gettime(CLOCK_MONOTONIC, &finish);
//Calculate the time spent between the start time and end time.
time_difference(&start, &finish, &time_elapsed);
//Output the time spent running the program.
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
130
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(int *a, int *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// c[id]=0;
// Make sure we do not go out of bounds
if (id < n)
*c+= a[id];
// printf("\n%d", c[id]);
}
int main( int argc, char* argv[] )
{
// Size of vectors
// int n = 100000;
int n=5;
const int size = n * sizeof(int);
// Host input vectors
int *h_a;
// double *h_b;
//Host output vector
int *h_c;
// Device input vectors
int *d_a;
//double *d_b;
//Device output vector
int *d_c;
int dev=0;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
//h_a = (int*)malloc(bytes);
//h_b = (double*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
// cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
printf("Input array");
// Initialize vectors on host
/*for( i = 0; i < n; i++ ) {
// h_a[i] = sin(i)*sin(i);
//printf("\n",i);
h_a[i]=i;
//printf("\n%d", h_a[i]);
//h_b[i]=i;
//h_b[i] = cos(i)*cos(i);
}*/
int a[]= {0, 1, 2, 3, 4};
cudaMalloc(&h_a, size);
// Copy host vectors to device
cudaMemcpy( h_a, a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_c, &dev, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 2;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a,d_c,n);
int result;
// Copy array back to host
cudaMemcpy( &result,d_c, sizeof(int), cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
//for(i=0; i<n; i++)
// sum += h_c[i];
printf("final result: %f\n",result );
// vecdev<<<gridSize, blockSize>>>(d_a,d_c, n);
// Release device memory
cudaFree(d_a);
//cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
//free(h_b);
free(h_c);
return 0;
}
|
131
|
/***************************************************************************//**
* \file N.cu
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief kernels to generate the advection term
*/
#include "N.h"
/**
* \namespace kernels
* \brief Contains all the custom-written CUDA kernels.
*/
namespace kernels
{
/*
* calculates explicit advection terms in the middle of the domain
* param N explicit advection terms
* param u u velocities
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param nx number of cells in x direction
* param ny number of cells in y direction
*/
__global__
void Nmidx(double *N, double *u, double *dx, double *dy, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny)
return;
int i = threadIdx.x + blockDim.x * blockIdx.x,
I = i % (nx-1),
J = i / (nx-1),
iv = (nx-1)*ny + nx*J +I;
if (I == 0 || I == nx-2 || J == 0 || J == ny-1)
return;
N[i] = u[i]*(
u[i+1] * dx[I]/(dx[I+1]*(dx[I]+dx[I+1]))//east
+ u[i-1] * (dx[I]/(dx[I]*(dx[I]+dx[I+1])) - 1/dx[I])//west
+ u[i] * (-dx[I]/(dx[I+1]*(dx[I]+dx[I+1])) - (dx[I]/(dx[I]*(dx[I]+dx[I+1])) - 1/dx[I]))//center
) +
(((u[iv+1]-u[iv])*dx[I]/(dx[I]+dx[I+1]) + u[iv]) / 2 + ((u[iv-nx+1]-u[iv-nx])*dx[I]/(dx[I]+dx[I+1]) + u[iv-nx]) / 2) //v
*(
u[i+nx-1] * dy[J]/(dy[J]*(dy[J+1]+dy[J]))//North
+ u[i-nx+1] * (dy[J-1]/(dy[J]*(dy[J]+dy[J-1])) -1/dy[J])//South
+ u[i] * (-dy[J]/(dy[J]*(dy[J+1]+dy[J])) - (dy[J-1]/(dy[J]*(dy[J]+dy[J-1])) -1/dy[J]))//more center
);
}
/*
* calculates explicit advection terms at the edge of the domain
* param N explicit advection terms
* param u u velocities
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param ym yminus boundary velocities
* param yp yplus boundary velocities
* param xm xminus boundary velocities
* param xp xplus boundary velocities
* param nx number of cells in x direction
* param ny number of cells in y direction
*/
__global__
void Nbcx(double *N, double *u, double *dx, double *dy, double *ym, double *yp, double *xm, double *xp, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny)
return;
int i = threadIdx.x + blockDim.x * blockIdx.x,
I = i % (nx-1),
J = i / (nx-1),
iv = (nx-1)*ny + nx*J +I;
if (I != 0 && I != nx-2 && J != 0 && J != ny-1)
return;
double temp = 0;
//East
if (I == nx-2)
temp += u[i]*(
xp[J] * dx[I]/(dx[I+1]*(dx[I]+dx[I+1]))//east
+ u[i-1] * (dx[I]/(dx[I]*(dx[I]+dx[I+1])) - 1/dx[I])//west
+ u[i] * (-dx[I]/(dx[I+1]*(dx[I]+dx[I+1])) - (dx[I]/(dx[I]*(dx[I]+dx[I+1])) - 1/dx[I]))//center
);
//West
else if(I == 0)
temp += u[i]*(
u[i+1] * dx[I]/(dx[I+1]*(dx[I]+dx[I+1]))//east
+ xm[J] * (dx[I]/(dx[I]*(dx[I]+dx[I+1])) - 1/dx[I])//west
+ u[i] * (-dx[I]/(dx[I+1]*(dx[I]+dx[I+1])) - (dx[I]/(dx[I]*(dx[I]+dx[I+1])) - 1/dx[I]))//center
);
//E-W center
else
temp += u[i]*(
u[i+1] * dx[I]/(dx[I+1]*(dx[I]+dx[I+1]))//east
+ u[i-1] * (dx[I]/(dx[I]*(dx[I]+dx[I+1])) - 1/dx[I])//west
+ u[i] * (-dx[I]/(dx[I+1]*(dx[I]+dx[I+1])) - (dx[I]/(dx[I]*(dx[I]+dx[I+1])) - 1/dx[I]))//center
);
//North
if(J == ny-1)
temp += (((yp[(nx-1)+I+1]-yp[(nx-1)+I])*dx[I]/(dx[I]+dx[I+1]) + yp[(nx-1)+I]) / 2 + ((u[iv-nx+1]-u[iv-nx])*dx[I]/(dx[I]+dx[I+1]) + u[iv-nx]) / 2) //v
*(
(2*yp[I] - u[i]) * dy[J] /(dy[J]*(dy[J]+dy[J]))//North
+ u[i-nx+1] * (dy[J-1]/(dy[J]*(dy[J]+dy[J-1])) -1/dy[J])//South
+ u[i] * (-dy[J] /(dy[J]*(dy[J]+dy[J])) - (dy[J-1]/(dy[J]*(dy[J]+dy[J-1])) -1/dy[J]))//more center
);
//South
else if(J == 0)
temp += (((u[iv+1]-u[iv])*dx[I]/(dx[I]+dx[I+1]) + u[iv]) / 2 + ((ym[(nx-1)+I+1]-ym[(nx-1)+I])*dx[I]/(dx[I]+dx[I+1]) + ym[(nx-1)+I]) / 2) //v
*(
u[i+nx-1] * dy[J] /(dy[J]*(dy[J+1]+dy[J]))//North
+ (2*ym[I]-u[i]) * (dy[J] /(dy[J]*(dy[J]+dy[J])) -1/dy[J])//South
+ u[i] * (-dy[J] /(dy[J]*(dy[J+1]+dy[J])) - (dy[J]/(dy[J]*(dy[J]+dy[J])) -1/dy[J]))//more center
);
//N-S center
else
temp += (((u[iv+1]-u[iv])*dx[I]/(dx[I]+dx[I+1]) + u[iv]) / 2 + ((u[iv-nx+1]-u[iv-nx])*dx[I]/(dx[I]+dx[I+1]) + u[iv-nx]) / 2) //v
*(
u[i+nx-1] * dy[J]/(dy[J]*(dy[J+1]+dy[J]))//North
+ u[i-nx+1] * (dy[J-1]/(dy[J]*(dy[J]+dy[J-1])) -1/dy[J])//South
+ u[i] * (-dy[J]/(dy[J]*(dy[J+1]+dy[J])) - (dy[J-1]/(dy[J]*(dy[J]+dy[J-1])) -1/dy[J]))//more center
);
N[i] = temp;
}
/*
* calculates explicit advection terms in the middle of the domain
* param N explicit advection terms
* param u u velocities
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param nx number of cells in x direction
* param ny number of cells in y direction
*/
__global__
void Nmidy(double *N, double *u, double *dx, double *dy, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1))
return;
int ip = threadIdx.x + blockDim.x * blockIdx.x,
I = ip % nx,
J = ip / nx,
iu = (nx-1)*J + I,
iv = ip + (nx-1)*ny;
if (I == 0 || I == nx-1 || J == 0 || J == ny-2)
return;
N[iv] = (((u[iu+(nx-1)]-u[iu])*dy[J]/(dy[J]+dy[J+1]) + u[iu]) / 2 + ((u[iu+(nx-1)-1]-u[iu-1])*dy[J]/(dy[J]+dy[J+1]) + u[iu-1]) / 2) //u
*(
u[iv+1] * dx[I]/(dx[I]*(dx[I]+dx[I+1]))//east
+ u[iv-1] * (dx[I-1]/(dx[I]*(dx[I]+dx[I-1])) - 1/dx[I])//west
+ u[iv] * (-dx[I]/(dx[I]*(dx[I]+dx[I+1])) - (dx[I-1]/(dx[I]*(dx[I]+dx[I-1])) - 1/dx[I]))//center
)+
u[iv]
*(
u[iv+nx] * dy[J]/(dy[J+1]*(dy[J+1]+dy[J]))//North
+ u[iv-nx] *(dy[J]/(dy[J] *(dy[J+1]+dy[J])) - 1/dy[J])//South
+ u[iv] * (-dy[J]/(dy[J+1]*(dy[J+1]+dy[J])) - (dy[J]/(dy[J]*(dy[J+1]+dy[J])) - 1/dy[J]))//more center
);
}
/*
* calculates explicit advection terms at the edge of the domain
* param N explicit advection terms
* param u v velocities
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param ym yminus boundary velocities
* param yp yplus boundary velocities
* param xm xminus boundary velocities
* param xp xplus boundary velocities
* param nx number of cells in x direction
* param ny number of cells in y direction
*/
__global__
void Nbcy(double *N, double *u, double *dx, double *dy, double *ym, double *yp, double *xm, double *xp, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1))
return;
int ip = threadIdx.x + blockDim.x * blockIdx.x,
I = ip % nx,
J = ip / nx,
iu = (nx-1)*J + I,
iv = ip + (nx-1)*ny;
if (I != 0 && I != nx-1 && J != 0 && J != ny-2)
return;
double temp = 0;
//East
if (I == nx-1)
temp += (((xp[J+1]-xp[J])*dy[J]/(dy[J]+dy[J+1]) + xp[J]) / 2 + ((u[iu+(nx-1)-1]-u[iu-1])*dy[J]/(dy[J]+dy[J+1]) + u[iu-1]) / 2) //u
*(
(2*xp[ny+J] - u[iv]) * dx[I] /(dx[I]*(dx[I]+dx[I]))//east
+ u[iv-1] * (dx[I-1]/(dx[I]*(dx[I]+dx[I-1])) - 1/dx[I])//west
+ u[iv] * (-dx[I] /(dx[I]*(dx[I]+dx[I])) - (dx[I-1]/(dx[I]*(dx[I]+dx[I-1])) - 1/dx[I]))//center
);
//West
else if(I == 0)
temp += (((u[iu+(nx-1)]-u[iu])*dy[J]/(dy[J]+dy[J+1]) + u[iu]) / 2 + ((xm[J+1]-xm[J])*dy[J]/(dy[J]+dy[J+1]) + xm[J]) / 2) //u
*(
u[iv+1] * dx[I] /(dx[I]*(dx[I]+dx[I+1]))//east
+ (2*xm[ny+J] - u[iv]) * (dx[I] /(dx[I]*(dx[I]+dx[I])) - 1/dx[I])//west
+ u[iv] * (-dx[I]/(dx[I]*(dx[I]+dx[I+1])) - (dx[I]/(dx[I]*(dx[I]+dx[I])) - 1/dx[I]))//center
);
//E-W center
else
temp += (((u[iu+(nx-1)]-u[iu])*dy[J]/(dy[J]+dy[J+1]) + u[iu]) / 2 + ((u[iu+(nx-1)-1]-u[iu-1])*dy[J]/(dy[J]+dy[J+1]) + u[iu-1]) / 2) //u
*(
u[iv+1] * dx[I]/(dx[I]*(dx[I]+dx[I+1]))//east
+ u[iv-1] * (dx[I-1]/(dx[I]*(dx[I]+dx[I-1])) - 1/dx[I])//west
+ u[iv] * (-dx[I]/(dx[I]*(dx[I]+dx[I+1])) - (dx[I-1]/(dx[I]*(dx[I]+dx[I-1])) - 1/dx[I]))//center
);
//North
if(J == ny-2)
temp += u[iv]
*(
yp[(nx-1)+I] * dy[J]/(dy[J+1]*(dy[J+1]+dy[J]))//North
+ u[iv-nx] *(dy[J]/(dy[J] *(dy[J+1]+dy[J])) - 1/dy[J])//South
+ u[iv] * (-dy[J]/(dy[J+1]*(dy[J+1]+dy[J])) - (dy[J]/(dy[J]*(dy[J+1]+dy[J])) - 1/dy[J]))//more center
);
//South
else if(J == 0)
temp += u[iv]
*(
u[iv+nx] * dy[J]/(dy[J+1]*(dy[J+1]+dy[J]))//North
+ ym[(nx-1)+I] *(dy[J]/(dy[J] *(dy[J+1]+dy[J])) - 1/dy[J])//South
+ u[iv] * (-dy[J]/(dy[J+1]*(dy[J+1]+dy[J])) - (dy[J]/(dy[J]*(dy[J+1]+dy[J])) - 1/dy[J]))//more center
);
//N-S center
else
temp += u[iv]
*(
u[iv+nx] * dy[J]/(dy[J+1]*(dy[J+1]+dy[J]))//North
+ u[iv-nx] *(dy[J]/(dy[J] *(dy[J+1]+dy[J])) - 1/dy[J])//South
+ u[iv] * (-dy[J]/(dy[J+1]*(dy[J+1]+dy[J])) - (dy[J]/(dy[J]*(dy[J+1]+dy[J])) - 1/dy[J]))//more center
);
N[iv] = temp;
}
}
|
132
|
#include "includes.h"
__global__ void gpu_get_neighors(int *neighbors, int n , int k)
{
for (int off1 = 0; off1 < n/gridDim.x+1 ; off1++)
{
for(int off2 = 0; off2 < n/blockDim.x+1 ;off2++){
int m = blockIdx.x+off1*gridDim.x;
int l = threadIdx.x+off2*blockDim.x;
int counter_i =0;
if(m<n && l<n){
for (int i = m-(k/2); i <= m+(k/2); i++)
{
int counter_j=0;
for (int j = l-(k/2); j <= l+(k/2); j++)
{
int index , index_i , index_j;
index = m*n*k*k + l*k*k + counter_i*k +counter_j;
index_i =(n+i)%n;
index_j=(n+j)%n;
neighbors[index] = index_i*n+index_j;
counter_j++;
}
counter_i++;
}
}
}
}
}
|
133
|
#include "global_defines.cuh"
#include <cstdio>
#include <cstdlib>
void temp_compare(FLOATING *a, FLOATING *b){
int x,y,z;
int missed=0;
int lx=680, ly=73, lz=73;
for (z = 0 ; z< lz ; ++z){
for (y = 0 ; y< ly ; ++y){
for (x = 0 ; x< lx; ++x){
if(abs(a[index(z,y,x)]-b[index(z,y,x)])>0.00001)
++missed;
}
}
}
cout <<"totally missed:" << missed << endl;
cout << " FULL MATCH!" << endl;
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct
{
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{
{ 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class
{ 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class
{ 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class
{ 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class
{ 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class
{ 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class
{ 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{ 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1)
{
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor))
{
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[7].Cores);
return nGpuArchCoresPerSM[7].Cores;
}
// end of GPU Architecture definitions
void cuda_device_querry(){
cout << "CUDA DEVISE TEST - START" << endl;
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
cout <<"\tcudaGetDeviceCount returned" <<(int)error_id << " %d\n-> " << cudaGetErrorString(error_id) << endl;
cout <<"\tResult = FAIL\n" << endl;
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
cout << "\tThere are no available device(s) that support CUDA\n" << endl;
}
else
{
cout << "\tDetected " << deviceCount << " CUDA Capable device(s)\n" << endl;
}
int dev, driverVersion = 0, runtimeVersion = 0;
for (dev = 0; dev < deviceCount; ++dev){
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("\tDevice %d: \"%s\"\n", dev, deviceProp.name);
cout <<"\tDevice " << dev << ": \" " << deviceProp.name << " \" "<<endl;
// Console log
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf("\tCUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
printf("\tCUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor);
char msg[256];
sprintf(msg, "\tTotal amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
printf("\t%s", msg);
printf("\t(%2d) Multiprocessors x (%3d) CUDA Cores/MP: %d CUDA Cores\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
printf("\tGPU Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf("\tMax Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n",
deviceProp.maxTexture1D , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf("\tMax Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1],
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);
printf("\tTotal amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf("\tTotal amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf("\tTotal number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf("\tWarp size: %d\n", deviceProp.warpSize);
printf("\tMaximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf("\tMaximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf("\tMaximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf("\tMaximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf("\tMaximum memory pitch: %lu bytes\n", deviceProp.memPitch);
printf("\tTexture alignment: %lu bytes\n", deviceProp.textureAlignment);
printf("\tConcurrent copy and kernel execution: %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount);
printf("\tRun time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf("\tIntegrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No");
printf("\tSupport host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No");
printf("\tAlignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No");
printf("\tDevice has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled");
printf("\tDevice supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No");
printf("\tDevice PCI Bus ID / PCI location ID: %d / %d\n", deviceProp.pciBusID, deviceProp.pciDeviceID);
const char *sComputeMode[] =
{
"Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)",
"Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)",
"Prohibited (no host thread can use ::cudaSetDevice() with this device)",
"Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)",
"Unknown",
NULL
};
printf("\tCompute Mode:\n");
printf("\t\t< %s >\n", sComputeMode[deviceProp.computeMode]);
}
// csv masterlog info
// *****************************
// exe and CUDA driver name
printf("\n");
std::string sProfileString = "deviceQuery, CUDA Driver = CUDART";
char cTemp[16];
// driver version
sProfileString += ", CUDA Driver Version = ";
#ifdef WIN32
sprintf_s(cTemp, 10, "%d.%d", driverVersion/1000, (driverVersion%100)/10);
#else
sprintf(cTemp, "%d.%d", driverVersion/1000, (driverVersion%100)/10);
#endif
sProfileString += cTemp;
// Runtime version
sProfileString += ", CUDA Runtime Version = ";
#ifdef WIN32
sprintf_s(cTemp, 10, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10);
#else
sprintf(cTemp, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10);
#endif
sProfileString += cTemp;
// Device count
sProfileString += ", NumDevs = ";
#ifdef WIN32
sprintf_s(cTemp, 10, "%d", deviceCount);
#else
sprintf(cTemp, "%d", deviceCount);
#endif
sProfileString += cTemp;
// Print Out all device Names
for (dev = 0; dev < deviceCount; ++dev)
{
#ifdef _WIN32
sprintf_s(cTemp, 13, ", Device%d = ", dev);
#else
sprintf(cTemp, ", Device%d = ", dev);
#endif
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
sProfileString += cTemp;
sProfileString += deviceProp.name;
}
sProfileString += "\n";
printf("%s", sProfileString.c_str());
printf("\tResult = PASS\n");
// finish
cout << "CUDA DEVISE TEST - END" << endl;
}
void read_external_geometry_file_specification_for_LBM(int &lx, int &ly, int &lz, int &n_of_densities, const string filename){
vector<string> geometry_parameters;
ifstream conf_file(filename.c_str());
string buff;
if(conf_file.is_open()){
while(conf_file>>buff){
geometry_parameters.push_back(buff);
}
cout << "Geometry Parameters Read:" << endl;
lx=atoi(geometry_parameters[0].c_str());
cout << "\t domain length in X: " << lx << endl;
ly=atoi(geometry_parameters[1].c_str());
cout << "\t domain length in Y: " << ly << endl;
lz=atoi(geometry_parameters[2].c_str());
cout << "\t domain length in Z: " << lz << endl;
n_of_densities=atoi(geometry_parameters[3].c_str());
cout << "\t number of densities on each node: " << n_of_densities << endl;
cout <<"total:" << geometry_parameters.size() << " parameters were read" << endl;
conf_file.close();
}else{
cout << "The file "<< filename << " was not found" << endl;
cout << "Create a new file at the root directory with 3 lines (one number on every line), each corresponding to the respective dimension of X,Y,Z" << endl;
exit (-1);
}
}
lattice::lattice(int LX,int LY, int LZ):
lx(LX), ly(LY), lz(LZ){
Q0=new FLOATING[lz*ly*lx];
Q1=new FLOATING[lz*ly*lx];
Q2=new FLOATING[lz*ly*lx];
Q3=new FLOATING[lz*ly*lx];
Q4=new FLOATING[lz*ly*lx];
Q5=new FLOATING[lz*ly*lx];
Q6=new FLOATING[lz*ly*lx];
Q7=new FLOATING[lz*ly*lx];
Q8=new FLOATING[lz*ly*lx];
Q9=new FLOATING[lz*ly*lx];
Q10=new FLOATING[lz*ly*lx];
Q11=new FLOATING[lz*ly*lx];
Q12=new FLOATING[lz*ly*lx];
Q13=new FLOATING[lz*ly*lx];
Q14=new FLOATING[lz*ly*lx];
Q15=new FLOATING[lz*ly*lx];
Q16=new FLOATING[lz*ly*lx];
Q17=new FLOATING[lz*ly*lx];
Q18=new FLOATING[lz*ly*lx];
initialise(Q0);
initialise(Q1);
initialise(Q2);
initialise(Q3);
initialise(Q4);
initialise(Q5);
initialise(Q6);
initialise(Q7);
initialise(Q8);
initialise(Q9);
initialise(Q10);
initialise(Q11);
initialise(Q12);
initialise(Q13);
initialise(Q14);
initialise(Q15);
initialise(Q16);
initialise(Q17);
initialise(Q18);
}
lattice::lattice(int LX,int LY, int LZ, int dump):
lx(LX), ly(LY), lz(LZ){
int FLOATING_array_size=lx*ly*lz*sizeof(FLOATING);
cudaMalloc((void **)&Q0, FLOATING_array_size);
cudaMalloc((void **)&Q1, FLOATING_array_size);
cudaMalloc((void **)&Q2, FLOATING_array_size);
cudaMalloc((void **)&Q3, FLOATING_array_size);
cudaMalloc((void **)&Q4, FLOATING_array_size);
cudaMalloc((void **)&Q5, FLOATING_array_size);
cudaMalloc((void **)&Q6, FLOATING_array_size);
cudaMalloc((void **)&Q7, FLOATING_array_size);
cudaMalloc((void **)&Q8, FLOATING_array_size);
cudaMalloc((void **)&Q9, FLOATING_array_size);
cudaMalloc((void **)&Q10, FLOATING_array_size);
cudaMalloc((void **)&Q11, FLOATING_array_size);
cudaMalloc((void **)&Q12, FLOATING_array_size);
cudaMalloc((void **)&Q13, FLOATING_array_size);
cudaMalloc((void **)&Q14, FLOATING_array_size);
cudaMalloc((void **)&Q15, FLOATING_array_size);
cudaMalloc((void **)&Q16, FLOATING_array_size);
cudaMalloc((void **)&Q17, FLOATING_array_size);
cudaMalloc((void **)&Q18, FLOATING_array_size);
}
lattice::~lattice(){
delete [] Q0;
delete [] Q1;
delete [] Q2;
delete [] Q3;
delete [] Q4;
delete [] Q5;
delete [] Q6;
delete [] Q7;
delete [] Q8;
delete [] Q9;
delete [] Q10;
delete [] Q11;
delete [] Q12;
delete [] Q13;
delete [] Q14;
delete [] Q15;
delete [] Q16;
delete [] Q17;
delete [] Q18;
printf("host memories deleted!\n");
}
void lattice::initialise(FLOATING *Q){
for(int z=0; z<lz; ++z)
for(int y=0; y<ly; ++y)
for(int x=0; x<lx; ++x)
Q[ index(z,y,x)]=0.0;
}
void LBM::create_an_example_configuration_files(const string filename){
ofstream example_file(filename.c_str());
if ( example_file.is_open()){
example_file << "10" << endl;
example_file << "100" << endl;
example_file << "0.0175" << endl;
example_file << "7" << endl;
example_file << "100" << endl;
example_file << "26" << endl;
example_file << "59" << endl;
example_file << "512" << endl;
example_file << "datum_design_case_name1" << endl;
example_file.close();
}
}
void LBM::display_the_structure_of_an_example_configuration_file(){
cout<< "Create a new file (with 9 lines) at the root directory following the template below:"
<< endl;
cout << "10 <--line 1: number of iterations" << endl;
cout << "100 <--line 2: check frequency" << endl;
cout << "0.0175 <--line 3: nu" << endl;
cout << "7 <--line 4: r_small (from baffle geometry)" << endl;
cout << "100 <--line 5: Reynolds Number" << endl;
cout << "26 <--line 6: S (from baffle geometry)" << endl;
cout << "59 <--line 7: baffle possition" << endl;
cout << "512 <--line 8: CUDA threads per kernel" << endl;
cout << "datum_design <--line 9: case name (ONE WORD!)" << endl;
}
void LBM::read_external_configuration_file_for_the_solver(const string filename) {
vector<string> configuration_parameters;
ifstream conf_file(filename.c_str());
string buff;
if (conf_file.is_open()) {
while (conf_file >> buff) {
configuration_parameters.push_back(buff);
}
cout << "Configuration Parameters Read:" << endl;
max_iterations = atoi(configuration_parameters[0].c_str());
cout << "\titerations: " << max_iterations << endl;
//check step:perform check_density and export
check_step = atoi(configuration_parameters[1].c_str());
cout << "\tcheck step: " << check_step << endl;
nu = atof(configuration_parameters[2].c_str());
cout << "\tnu: " << nu << endl;
r_small = atof(configuration_parameters[3].c_str());
cout << "\tr_small: " << r_small << endl;
reynolds = atof(configuration_parameters[4].c_str());
cout << "\treynolds: " << reynolds << endl;
s = atof(configuration_parameters[5].c_str());
cout << "\ts: " << s << endl;
baffle = atoi(configuration_parameters[6].c_str());
cout << "\tbaffle position on X=" << baffle << endl;
threads_per_kernel = atoi(configuration_parameters[7].c_str());
cout << "\tCUDA threads per kernel: " << threads_per_kernel << endl;
case_name = configuration_parameters[8].c_str();
cout << "Case: " << case_name << endl;
cout << "total:" << configuration_parameters.size()
<< " parameters were read" << endl;
conf_file.close();
} else {
cout << "The file "<<filename <<" was not found" << endl;
display_the_structure_of_an_example_configuration_file();
create_an_example_configuration_files(filename);
exit(-2);
}
}
void LBM::delete_device_data(){
cudaFree(D3_d.Q0);
cudaFree(D3_d.Q1);
cudaFree(D3_d.Q2);
cudaFree(D3_d.Q3);
cudaFree(D3_d.Q4);
cudaFree(D3_d.Q5);
cudaFree(D3_d.Q6);
cudaFree(D3_d.Q7);
cudaFree(D3_d.Q8);
cudaFree(D3_d.Q9);
cudaFree(D3_d.Q10);
cudaFree(D3_d.Q11);
cudaFree(D3_d.Q12);
cudaFree(D3_d.Q13);
cudaFree(D3_d.Q14);
cudaFree(D3_d.Q15);
cudaFree(D3_d.Q16);
cudaFree(D3_d.Q17);
cudaFree(D3_d.Q18);
cudaFree(D3_hlp_d.Q0);
cudaFree(D3_hlp_d.Q1);
cudaFree(D3_hlp_d.Q2);
cudaFree(D3_hlp_d.Q3);
cudaFree(D3_hlp_d.Q4);
cudaFree(D3_hlp_d.Q5);
cudaFree(D3_hlp_d.Q6);
cudaFree(D3_hlp_d.Q7);
cudaFree(D3_hlp_d.Q8);
cudaFree(D3_hlp_d.Q9);
cudaFree(D3_hlp_d.Q10);
cudaFree(D3_hlp_d.Q11);
cudaFree(D3_hlp_d.Q12);
cudaFree(D3_hlp_d.Q13);
cudaFree(D3_hlp_d.Q14);
cudaFree(D3_hlp_d.Q15);
cudaFree(D3_hlp_d.Q16);
cudaFree(D3_hlp_d.Q17);
cudaFree(D3_hlp_d.Q18);
cudaFree(u_current_d);
cudaFree(u_current_temp_d);
cudaFree(v_current_d);
cudaFree(w_current_d);
cudaFree(u_previous_spatial_boundary_d);
cudaFree(v_previous_spatial_boundary_d);
cudaFree(w_previous_spatial_boundary_d);
cudaFree(u_previous_temporal_boundary_d);
cudaFree(v_previous_temporal_boundary_d);
cudaFree(w_previous_temporal_boundary_d);
cudaFree(temp_check_density_d);
cudaFree(temp_check_density_d_full);
cudaFree(obstacles_d);
cudaFree(temp_Uc_d);
printf("cuda memories deleted!\n");
}
__global__
void cuda_initialise_array(FLOATING *input_array, const int length, const FLOATING value){
int tid=blockIdx.x*blockDim.x+threadIdx.x;
input_array[tid]=value;
__syncthreads();
}
template <class T>
void LBM::initialise_array(T *array, const int length,const T init_value){
for(int i=0; i<length; ++i)
array[i]=init_value;
}
void LBM::abstract_initialise(){
//objectives
FLOATING temp_density=0.0;
initialise_microscopic_density_arrays_in_the_host();
calculate_macroscopic_density_in_the_host(temp_density);
create_reactor_geometry_in_the_host();
#ifdef PRODUCE_OUTPUT_FILES
geometry_file_in_VTK();
#endif //PRODUCE_OUTPUT_FILES
cout << "0th loop" <<endl;
//myLBM.initial_redistribute();
fortran_redistribute(0);
//first loop!!!
calculate_macroscopic_density_in_the_host(temp_density);
streaming();
bounceback();
initial_relaxation();
convective_BC();
cout <<"# iteration 0" << endl;
count_no_obstacles_at_penultimate_x_slice();
copy_data_from_host_to_device();
}
void LBM::abstract_check_density(){
//check density
if( time_unit%check_step==0){
#ifdef CPU_part
calculate_macroscopic_density_in_the_host(density);
#endif
#ifdef GPU_part
cuda_check_density(time_unit);
#endif
}
}
void LBM::abstract_debug_computations(){
#ifdef DEBUG
compare_obstacles(obstacles);
compare_nodes_hlp(n_hlp);
compare_nodes(node);
#endif
}
void LBM::abstract_redistribute(){
//redistribute
#ifdef CPU_part
redistribute();
#endif
#ifdef GPU_part
cuda_redistribute();
#endif
}
void LBM::abstract_streaming(){
//streaming
#ifdef CPU_part
streaming();
#endif
#ifdef GPU_part
cuda_streaming();
#endif
}
void LBM::abstract_bounce_back(){
//bounce back
#ifdef CPU_part
bounceback();
#endif
#ifdef GPU_part
cuda_bounceback();
#endif
}
void LBM::abstract_relaxation(){
//relaxation
#ifdef CPU_part
relaxation();
#endif
#ifdef GPU_part
cuda_relaxation();
#endif
}
void LBM::abstract_convective_boundary_conditions(){
//convective BC
#ifdef CPU_part
convective_BC();
#endif
#ifdef GPU_part
cuda_convective_BC();
#endif
}
void LBM::core_computations(){
//LBM CORE
abstract_redistribute();
abstract_streaming();
abstract_bounce_back();
abstract_relaxation();
abstract_convective_boundary_conditions();
}
void LBM::compute_domain(){
//starting from second loop!
for (time_unit = 1; time_unit<max_iterations ; ++time_unit){
// cout <<"# iteration " << time_unit << endl;
cout <<time_unit <<". ";
abstract_check_density();
abstract_debug_computations();
core_computations();
}
cout << endl;
}
void LBM::export_solution(){
calculate_macroscopic_quantities(time_unit);
#ifdef PRODUCE_OUTPUT_FILES
write_VTK_SI(time_unit);
#endif //PRODUCE_OUTPUT_FILES
}
template void
LBM::initialise_array<double>(double *array, const int length,const double init_value);
template void
LBM::initialise_array<float>(float *array, const int length,const float init_value);
template <typename T>
void LBM::allocate_and_initialise(T *array, const int length){
cudaMalloc((void **)&array, length*sizeof(T));
int n_of_threads=threads_per_kernel;
int n_of_blocks=ceil((length*1.0)/n_of_threads);
dim3 threads_type2(n_of_threads,1,1);
dim3 grid_type2(n_of_blocks,1,1);
//kane to template!
//cuda_initialise_array<<<grid_type2,threads_type2>>>(array, length, 0.0);
}
void LBM::calculate_CUDA_quantities() {
threads_for_streaming_collision_and_relaxation=threads_per_kernel;
blocks_for_streaming_collision_and_relaxation= (three_dimensional_length)/threads_per_kernel;
if ((three_dimensional_length%threads_per_kernel)!=0)
++blocks_for_streaming_collision_and_relaxation;
size_of_allocated_shared_memory_for_streaming_collision_and_relaxation=threads_per_kernel*sizeof(FLOATING);
convective_boundary_conditions_blocks=two_dimensional_length/threads_per_kernel;
if ( (two_dimensional_length%threads_per_kernel)!=0 )
++convective_boundary_conditions_blocks;
}
void LBM::reset_convergence_file(){
//delete previous convergence.txt and create a new one
if( remove("LBM2_convergence.txt")!=0 )
cout <<"couldn't delete LBM2_convergence.txt" << endl;
else
cout<< "creating LBM2_convergence.txt" << endl;
ofstream convergence_file("LBM2_convergence.txt");
convergence_file<<"#iteration ; converegence_value" << endl;
convergence_file.close();
}
void LBM::display_CUDA_specifications(){
cout <<"CUDA specifications:" <<endl;
cout <<"\tstreaming/collision/relaxation:" << endl;
cout <<"\t\tthreads: "<<threads_for_streaming_collision_and_relaxation<<endl;
cout <<"\t\tblocks: "<<blocks_for_streaming_collision_and_relaxation<<endl;
cout <<"\t\tshare memory size: "<< size_of_allocated_shared_memory_for_streaming_collision_and_relaxation <<endl;
cout <<"\tconvective boundary conditions:" << endl;
cout <<"\t\tthreads:" << threads_per_kernel << endl;
cout <<"\t\tblocks: "<< convective_boundary_conditions_blocks << endl;
}
void LBM::initialise_host_data(){
initialise_array(obstacles, three_dimensional_length,0 );
initialise_array<FLOATING>(u_current, two_dimensional_length,0.0);
initialise_array<FLOATING>(v_current, two_dimensional_length,0.0);
initialise_array<FLOATING>(w_current, two_dimensional_length,0.0);
// u_previous_spatial_boundary: at boundary - 1 (in x)
initialise_array<FLOATING>(u_previous_spatial_boundary, two_dimensional_length,0.0);
initialise_array<FLOATING>(v_previous_spatial_boundary, two_dimensional_length,0.0);
initialise_array<FLOATING>(w_previous_spatial_boundary, two_dimensional_length,0.0);
// u_prev: at boundary - 1 (in time)
initialise_array<FLOATING>(u_previous_temporal_boundary, two_dimensional_length,0.0);
initialise_array<FLOATING>(v_previous_temporal_boundary, two_dimensional_length,0.0);
initialise_array<FLOATING>(w_previous_temporal_boundary, two_dimensional_length,0.0);
}
void LBM::allocate_device_arrays(){
//allocate additional cuda memories
cudaMalloc((void **)&u_current_d, FLOATING_slice_size);
cudaMalloc((void **)&u_current_temp_d, FLOATING_slice_size);
cudaMalloc((void **)&v_current_d, FLOATING_slice_size);
cudaMalloc((void **)&w_current_d, FLOATING_slice_size);
cudaMalloc((void **)&u_previous_spatial_boundary_d, FLOATING_slice_size);
cudaMalloc((void **)&v_previous_spatial_boundary_d, FLOATING_slice_size);
cudaMalloc((void **)&w_previous_spatial_boundary_d, FLOATING_slice_size);
cudaMalloc((void **)&u_previous_temporal_boundary_d, FLOATING_slice_size);
cudaMalloc((void **)&v_previous_temporal_boundary_d, FLOATING_slice_size);
cudaMalloc((void **)&w_previous_temporal_boundary_d, FLOATING_slice_size);
cudaMalloc((void **)&temp_check_density_d, lx*ly*lz/4 *sizeof(FLOATING));
cudaMalloc((void **)&temp_check_density_d_full, lx*ly*lz*sizeof(FLOATING));
cudaMalloc((void **)&obstacles_d, int_array_size);
cudaMalloc((void **)&temp_Uc_d, 2*sizeof(FLOATING));
}
void LBM::initialise_device_data(){
dim3 threads_type2(threads_for_streaming_collision_and_relaxation,1,1);
dim3 grid_type2(blocks_for_streaming_collision_and_relaxation,1,1);
cuda_initialise_array<<<grid_type2,threads_type2>>>(temp_check_density_d_full, lx*ly*lz, 0.0);
}
void LBM::initialise_all_data_arrays(){
initialise_host_data();
allocate_device_arrays();
initialise_device_data();
}
void LBM::display_LBM_specifications(){
cout << "constructing LBM(built-in quantities)" << endl;
cout << "\tdensity" << density << endl;
cout << "\tt_0=" << t_0 << endl;
cout << "\tt_1=" << t_1 << endl;
cout << "\tt_2=" << t_2 << endl;
cout << "\tc_squ=" << c_squ << endl;
cout << "\ttau=" << tau << endl;
cout << "\tomega=" << omega << endl;
}
LBM::LBM(const int &LX, const int &LY, const int &LZ, const FLOATING &DENSITY, const FLOATING &T_0,
const FLOATING &T_1, const FLOATING &T_2, const FLOATING &C_SQU):
time_elapsed(0),
max_iterations(1000),
check_step(100),
lx(LX), ly(LY), lz(LZ),
lattice_nodes(lx*ly*lz), no_obstacle_lattices_at_penultimate_x_slice(0),
threads_for_streaming_collision_and_relaxation(512),
blocks_for_streaming_collision_and_relaxation(32),
size_of_allocated_shared_memory_for_streaming_collision_and_relaxation(48*1024),
convective_boundary_conditions_blocks(32),
nu(0.0175), r_small(6.67897), reynolds(195.732), s(23.7849), density(DENSITY),
t_0(density*T_0), t_1(density*T_1), t_2(density*T_2), c_squ(C_SQU), reciprocal_c_squ(1.0/c_squ),
baffle(XBAFFLE), threads_per_kernel(MANY_THREADS), time_unit(0),
two_dimensional_length(ly*lz),
three_dimensional_length(lx*ly*lz),
FLOATING_slice_size((two_dimensional_length)*sizeof(FLOATING)),
int_array_size((three_dimensional_length)*sizeof(int)),
tau(3.0*nu + 0.5), omega(1.0 /tau), one_minus_omega (1.0-omega),
pr_diff(0.0), pr_out(0.0), pr_in(0.0), vor(0.0),
D3(lx, ly, lz), D3_hlp(lx, ly, lz), obstacles(new int[lz*ly*lx]),
u_current(new FLOATING[ly*lz]), v_current(new FLOATING[ly*lz]), w_current(new FLOATING[ly*lz]),
u_previous_spatial_boundary(new FLOATING[ly*lz]), v_previous_spatial_boundary(new FLOATING[ly*lz]), w_previous_spatial_boundary(new FLOATING[ly*lz]),
u_previous_temporal_boundary(new FLOATING[ly*lz]), v_previous_temporal_boundary(new FLOATING[ly*lz]), w_previous_temporal_boundary(new FLOATING[ly*lz]),
u_current_d(NULL), u_current_temp_d(NULL), v_current_d(NULL), w_current_d(NULL),
// u_previous_spatial_boundary: at boundary - 1 (in x)
u_previous_spatial_boundary_d(NULL), v_previous_spatial_boundary_d(NULL), w_previous_spatial_boundary_d(NULL),
// u_prev: at boundary - 1 (in time)
u_previous_temporal_boundary_d(NULL), v_previous_temporal_boundary_d(NULL), w_previous_temporal_boundary_d(NULL),
temp_cpu_u_current_d(NULL), temp_cpu_v_current_d(NULL), temp_cpu_w_current_d(NULL),
temp_cpu_u_previous_temporal_boundary_d(NULL), temp_cpu_v_previous_temporal_boundary_d(NULL), temp_cpu_w_previous_temporal_boundary_d(NULL),
temp_cpu_u_previous_spatial_boundary_d(NULL), temp_cpu_v_previous_spatial_boundary_d(NULL), temp_cpu_w_previous_spatial_boundary_d(NULL),
temp_check_density_d(NULL), temp_check_density_d_full(NULL),
data_location(CPU),
temp_Uc_d(NULL), obstacles_d(NULL),
D3_d(lx, ly, lz, 0), D3_hlp_d(lx, ly, lz, 0),
Ux(new FLOATING[lx*ly*lz]),
Uy(new FLOATING[lx*ly*lz]),
Uz(new FLOATING[lx*ly*lz]),
Pressure(new FLOATING[lx*ly*lz]),
Wx(new FLOATING[lx*ly*lz]),
Wy(new FLOATING[lx*ly*lz]),
Wz(new FLOATING[lx*ly*lz]){
cout << "***LBM Starting***" << endl;
time (&time_start);
read_external_configuration_file_for_the_solver("LBM2_configuration.txt");
reset_convergence_file();
calculate_CUDA_quantities();
display_CUDA_specifications();
initialise_all_data_arrays();
display_LBM_specifications();
abstract_initialise();
}
void LBM::delete_host_memories(){
delete [] obstacles;
delete [] u_current;
delete [] v_current;
delete [] w_current;
delete [] u_previous_spatial_boundary;
delete [] v_previous_spatial_boundary;
delete [] w_previous_spatial_boundary;
delete [] u_previous_temporal_boundary;
delete [] v_previous_temporal_boundary;
delete [] w_previous_temporal_boundary;
delete [] Ux;
delete [] Uy;
delete [] Uz;
delete [] Pressure;
delete [] Wx;
delete [] Wy;
delete [] Wz;
}
LBM::~LBM(){
delete_host_memories();
delete_device_data();
cout <<"all memories were deallocated!" <<endl;
cout << endl << "LBM2 ended in "<< time_elapsed<< "secs !" << endl; // prints
}
void LBM::count_no_obstacles_at_penultimate_x_slice(){
no_obstacle_lattices_at_penultimate_x_slice = 0;
#pragma unroll
for (int z = 0 ; z< lz ; ++z){
#pragma unroll
for (int y = 0 ; y< ly ; ++y){
if (obstacles[index(z,y,(lx-1))]==0) {
++no_obstacle_lattices_at_penultimate_x_slice ;
}
}
}
cout << "number of free lattices at U direction at the penultimate slice:" << no_obstacle_lattices_at_penultimate_x_slice <<endl;
}
void LBM::copy_data_from_host_to_device(){//copy data to CUDA variables
int const array_length=(lx*ly*lz);
int const slice_length=(ly*lz);
int const FLOATING_array_size=array_length*sizeof(FLOATING);
int const FLOATING_slice_size=slice_length*sizeof(FLOATING);
int const int_array_size=array_length*sizeof(int);
cudaMemcpy(D3_d.Q0 ,D3.Q0,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q1 ,D3.Q1,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q2 ,D3.Q2,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q3 ,D3.Q3,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q4 ,D3.Q4,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q5 ,D3.Q5,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q6 ,D3.Q6,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q7 ,D3.Q7,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q8 ,D3.Q8,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q9 ,D3.Q9,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q10 ,D3.Q10,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q11 ,D3.Q11,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q12 ,D3.Q12,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q13 ,D3.Q13,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q14 ,D3.Q14,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q15 ,D3.Q15,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q16 ,D3.Q16,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q17 ,D3.Q17,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q18 ,D3.Q18,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q0 ,D3_hlp.Q0,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q1 ,D3_hlp.Q1,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q2 ,D3_hlp.Q2,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q3 ,D3_hlp.Q3,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q4 ,D3_hlp.Q4,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q5 ,D3_hlp.Q5,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q6 ,D3_hlp.Q6,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q7 ,D3_hlp.Q7,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q8 ,D3_hlp.Q8,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q9 ,D3_hlp.Q9,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q10 ,D3_hlp.Q10,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q11 ,D3_hlp.Q11,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q12 ,D3_hlp.Q12,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q13 ,D3_hlp.Q13,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q14 ,D3_hlp.Q14,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q15 ,D3_hlp.Q15,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q16 ,D3_hlp.Q16,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q17 ,D3_hlp.Q17,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q18 ,D3_hlp.Q18,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(obstacles_d ,obstacles,int_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(u_current_d ,u_current,FLOATING_slice_size,cudaMemcpyHostToDevice);
cudaMemcpy(u_current_temp_d ,u_current,FLOATING_slice_size,cudaMemcpyHostToDevice);
cudaMemcpy(v_current_d ,v_current,FLOATING_slice_size,cudaMemcpyHostToDevice);
cudaMemcpy(w_current_d ,w_current,FLOATING_slice_size,cudaMemcpyHostToDevice);
cudaMemcpy(u_previous_spatial_boundary_d ,u_previous_spatial_boundary,FLOATING_slice_size,cudaMemcpyHostToDevice);
cudaMemcpy(v_previous_spatial_boundary_d ,v_previous_spatial_boundary,FLOATING_slice_size,cudaMemcpyHostToDevice);
cudaMemcpy(w_previous_spatial_boundary_d ,w_previous_spatial_boundary,FLOATING_slice_size,cudaMemcpyHostToDevice);
cudaMemcpy(u_previous_temporal_boundary_d ,u_previous_temporal_boundary,FLOATING_slice_size,cudaMemcpyHostToDevice);
cudaMemcpy(v_previous_temporal_boundary_d ,v_previous_temporal_boundary,FLOATING_slice_size,cudaMemcpyHostToDevice);
cudaMemcpy(w_previous_temporal_boundary_d ,w_previous_temporal_boundary,FLOATING_slice_size,cudaMemcpyHostToDevice);
data_location=GPU;
printf("all data were copied to device\n");
}
void LBM::small_copy_data_from_host_to_device(){//copy data to CUDA variables
int const array_length=(lx*ly*lz);
int const FLOATING_array_size=array_length*sizeof(FLOATING);
cudaMemcpy(D3_d.Q0 ,D3.Q0,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q1 ,D3.Q1,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q2 ,D3.Q2,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q3 ,D3.Q3,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q4 ,D3.Q4,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q5 ,D3.Q5,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q6 ,D3.Q6,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q7 ,D3.Q7,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q8 ,D3.Q8,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q9 ,D3.Q9,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q10 ,D3.Q10,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q11 ,D3.Q11,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q12 ,D3.Q12,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q13 ,D3.Q13,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q14 ,D3.Q14,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q15 ,D3.Q15,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q16 ,D3.Q16,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q17 ,D3.Q17,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_d.Q18 ,D3.Q18,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q0 ,D3_hlp.Q0,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q1 ,D3_hlp.Q1,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q2 ,D3_hlp.Q2,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q3 ,D3_hlp.Q3,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q4 ,D3_hlp.Q4,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q5 ,D3_hlp.Q5,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q6 ,D3_hlp.Q6,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q7 ,D3_hlp.Q7,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q8 ,D3_hlp.Q8,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q9 ,D3_hlp.Q9,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q10 ,D3_hlp.Q10,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q11 ,D3_hlp.Q11,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q12 ,D3_hlp.Q12,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q13 ,D3_hlp.Q13,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q14 ,D3_hlp.Q14,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q15 ,D3_hlp.Q15,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q16 ,D3_hlp.Q16,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q17 ,D3_hlp.Q17,FLOATING_array_size,cudaMemcpyHostToDevice);
cudaMemcpy(D3_hlp_d.Q18 ,D3_hlp.Q18,FLOATING_array_size,cudaMemcpyHostToDevice);
data_location=GPU;
printf("all data were copied to device\n");
}
void LBM::copy_data_from_device_to_host(){
int const array_length=(lx*ly*lz);
int const slice_length=(ly*lz);
int const FLOATING_array_size=array_length*sizeof(FLOATING);
int const FLOATING_slice_size=slice_length*sizeof(FLOATING);
int const int_array_size=array_length*sizeof(int);
cudaMemcpy(D3.Q0 ,D3_d.Q0,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q1 ,D3_d.Q1,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q2 ,D3_d.Q2,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q3 ,D3_d.Q3,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q4 ,D3_d.Q4,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q5 ,D3_d.Q5,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q6 ,D3_d.Q6,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q7 ,D3_d.Q7,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q8 ,D3_d.Q8,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q9 ,D3_d.Q9,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q10 ,D3_d.Q10,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q11 ,D3_d.Q11,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q12 ,D3_d.Q12,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q13 ,D3_d.Q13,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q14 ,D3_d.Q14,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q15 ,D3_d.Q15,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q16 ,D3_d.Q16,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q17 ,D3_d.Q17,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q18 ,D3_d.Q18,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q0 ,D3_hlp_d.Q0,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q1 ,D3_hlp_d.Q1,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q2 ,D3_hlp_d.Q2,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q3 ,D3_hlp_d.Q3,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q4 ,D3_hlp_d.Q4,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q5 ,D3_hlp_d.Q5,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q6 ,D3_hlp_d.Q6,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q7 ,D3_hlp_d.Q7,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q8 ,D3_hlp_d.Q8,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q9 ,D3_hlp_d.Q9,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q10 ,D3_hlp_d.Q10,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q11 ,D3_hlp_d.Q11,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q12 ,D3_hlp_d.Q12,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q13 ,D3_hlp_d.Q13,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q14 ,D3_hlp_d.Q14,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q15 ,D3_hlp_d.Q15,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q16 ,D3_hlp_d.Q16,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q17 ,D3_hlp_d.Q17,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q18 ,D3_hlp_d.Q18,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(obstacles ,obstacles_d,int_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(u_current ,u_current_d,FLOATING_slice_size,cudaMemcpyDeviceToHost);
cudaMemcpy(v_current ,v_current_d,FLOATING_slice_size,cudaMemcpyDeviceToHost);
cudaMemcpy(w_current ,w_current_d,FLOATING_slice_size,cudaMemcpyDeviceToHost);
cudaMemcpy(u_previous_spatial_boundary ,u_previous_spatial_boundary_d,FLOATING_slice_size,cudaMemcpyDeviceToHost);
cudaMemcpy(v_previous_spatial_boundary ,v_previous_spatial_boundary_d,FLOATING_slice_size,cudaMemcpyDeviceToHost);
cudaMemcpy(w_previous_spatial_boundary ,w_previous_spatial_boundary_d,FLOATING_slice_size,cudaMemcpyDeviceToHost);
cudaMemcpy(u_previous_temporal_boundary ,u_previous_temporal_boundary_d,FLOATING_slice_size,cudaMemcpyDeviceToHost);
cudaMemcpy(v_previous_temporal_boundary ,v_previous_temporal_boundary_d,FLOATING_slice_size,cudaMemcpyDeviceToHost);
cudaMemcpy(w_previous_temporal_boundary ,w_previous_temporal_boundary_d,FLOATING_slice_size,cudaMemcpyDeviceToHost);
data_location=CPU;
printf("all data were copied to host\n");
}
void LBM::small_copy_data_from_device_to_host(){
int const array_length=(lx*ly*lz);
int const FLOATING_array_size=array_length*sizeof(FLOATING);
cudaMemcpy(D3.Q0 ,D3_d.Q0,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q1 ,D3_d.Q1,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q2 ,D3_d.Q2,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q3 ,D3_d.Q3,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q4 ,D3_d.Q4,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q5 ,D3_d.Q5,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q6 ,D3_d.Q6,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q7 ,D3_d.Q7,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q8 ,D3_d.Q8,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q9 ,D3_d.Q9,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q10 ,D3_d.Q10,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q11 ,D3_d.Q11,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q12 ,D3_d.Q12,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q13 ,D3_d.Q13,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q14 ,D3_d.Q14,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q15 ,D3_d.Q15,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q16 ,D3_d.Q16,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q17 ,D3_d.Q17,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3.Q18 ,D3_d.Q18,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q0 ,D3_hlp_d.Q0,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q1 ,D3_hlp_d.Q1,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q2 ,D3_hlp_d.Q2,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q3 ,D3_hlp_d.Q3,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q4 ,D3_hlp_d.Q4,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q5 ,D3_hlp_d.Q5,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q6 ,D3_hlp_d.Q6,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q7 ,D3_hlp_d.Q7,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q8 ,D3_hlp_d.Q8,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q9 ,D3_hlp_d.Q9,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q10 ,D3_hlp_d.Q10,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q11 ,D3_hlp_d.Q11,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q12 ,D3_hlp_d.Q12,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q13 ,D3_hlp_d.Q13,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q14 ,D3_hlp_d.Q14,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q15 ,D3_hlp_d.Q15,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q16 ,D3_hlp_d.Q16,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q17 ,D3_hlp_d.Q17,FLOATING_array_size,cudaMemcpyDeviceToHost);
cudaMemcpy(D3_hlp.Q18 ,D3_hlp_d.Q18,FLOATING_array_size,cudaMemcpyDeviceToHost);
data_location=CPU;
printf("all data were copied to host\n");
}
void LBM::compare_obstacles(int *outter_obst){
int x,y,z;
for ( z =0 ; z < lz ; ++z){
for (y = 0; y < ly ; ++y){
for ( x = 0 ; x< lx ; ++x){
if( obstacles[index(z,y,x)]!=outter_obst[index(z,y,x)]){
cout << "obstacle miss-match @" << x << " " << y << " " << z <<endl;
exit(-2);
}
}
}
}
cout << "obstacles ok" << endl;
}
void LBM::compare_nodes(FLOATING *outter_node){
int x,y,z,i;
for ( z =0 ; z < lz ; ++z){
for (y = 0; y < ly ; ++y){
for ( x = 0 ; x< lx ; ++x){
i=0;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q0[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" << i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q0[index(z,y,x)] <<endl;
exit(-1000);
}
i=1;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q1[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q1[index(z,y,x)] <<endl;
exit(-1000);
}
i=2;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q2[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q2[index(z,y,x)] <<endl;
exit(-1000);
}
i=3;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q3[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q3[index(z,y,x)] <<endl;
exit(-1000);
}
i=4;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q4[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q4[index(z,y,x)] <<endl;
exit(-1000);
}
i=5;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q5[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q5[index(z,y,x)] <<endl;
exit(-1000);
}
i=6;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q6[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q6[index(z,y,x)] <<endl;
exit(-1000);
}
i=7;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q7[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q7[index(z,y,x)] <<endl;
exit(-1000);
}
i=8;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q8[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q8[index(z,y,x)] <<endl;
exit(-1000);
}
i=9;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q9[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q9[index(z,y,x)] <<endl;
exit(-1000);
}
i=10;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q10[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q10[index(z,y,x)] <<endl;
exit(-1000);
}
i=11;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q11[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q11[index(z,y,x)] <<endl;
exit(-1000);
}
i=12;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q12[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q12[index(z,y,x)] <<endl;
exit(-1000);
}
i=13;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q13[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q13[index(z,y,x)] <<endl;
exit(-1000);
}
i=14;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q14[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q14[index(z,y,x)] <<endl;
exit(-1000);
}
i=15;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q15[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q15[index(z,y,x)] <<endl;
exit(-1000);
}
i=16;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q16[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q16[index(z,y,x)] <<endl;
exit(-1000);
}
i=17;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q17[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q17[index(z,y,x)] <<endl;
exit(-1000);
}
i=18;
if( abs(outter_node[index4D(z,y,x,i)]-D3.Q18[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node[index4D(z,y,x,i)] << " vs "<< D3.Q18[index(z,y,x)] <<endl;
exit(-1000);
}
}
}
}
cout << "nodes ok" << endl;
}
void LBM::compare_nodes_hlp(FLOATING *outter_node_hlp){
int x,y,z,i;
for ( z =0 ; z < lz ; ++z){
for (y = 0; y < ly ; ++y){
for ( x = 0 ; x< lx ; ++x){
i=0;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q0[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q0[index(z,y,x)] <<endl;
exit(-1000);
}
i=1;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q1[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q1[index(z,y,x)] <<endl;
exit(-1000);
}
i=2;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q2[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q2[index(z,y,x)] <<endl;
exit(-1000);
}
i=3;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q3[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q3[index(z,y,x)] <<endl;
exit(-1000);
}
i=4;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q4[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q4[index(z,y,x)] <<endl;
exit(-1000);
}
i=5;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q5[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q5[index(z,y,x)] <<endl;
exit(-1000);
}
i=6;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q6[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q6[index(z,y,x)] <<endl;
exit(-1000);
}
i=7;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q7[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q7[index(z,y,x)] <<endl;
exit(-1000);
}
i=8;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q8[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q8[index(z,y,x)] <<endl;
exit(-1000);
}
i=9;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q9[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q9[index(z,y,x)] <<endl;
exit(-1000);
}
i=10;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q10[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q10[index(z,y,x)] <<endl;
exit(-1000);
}
i=11;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q11[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q11[index(z,y,x)] <<endl;
exit(-1000);
}
i=12;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q12[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q12[index(z,y,x)] <<endl;
exit(-1000);
}
i=13;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q13[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q13[index(z,y,x)] <<endl;
exit(-1000);
}
i=14;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q14[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q14[index(z,y,x)] <<endl;
exit(-1000);
}
i=15;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q15[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q15[index(z,y,x)] <<endl;
exit(-1000);
}
i=16;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q16[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q16[index(z,y,x)] <<endl;
exit(-1000);
}
i=17;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q17[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3_hlp.Q17[index(z,y,x)] <<endl;
exit(-1000);
}
i=18;
if( abs(outter_node_hlp[index4D(z,y,x,i)]-D3_hlp.Q18[index(z,y,x)])>0.00001){
cout << "node miss-match @ x:" << x << " y:" << y << " z:" << z << " i:" <<i << " :" << outter_node_hlp[index4D(z,y,x,i)] << " vs "<< D3.Q18[index(z,y,x)] <<endl;
exit(-1000);
}
}
}
}
cout << "n_hlp ok" << endl;
}
|
134
|
#include "includes.h"
__global__ void kernel_push2_atomic( int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_sink_weight, int *g_push_reser, int *g_pull_left, int *g_pull_right, int *g_pull_down, int *g_pull_up, int *g_relabel_mask, int *g_graph_height, int *g_height_write, int graph_size, int width, int rows, int graph_size1, int width1, int rows1)
{
int x1 = threadIdx.x ;
int y1 = threadIdx.y ;
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
__shared__ int height_fn[356];
int temp_mult = __umul24(y1+1 , 34 ) + x1 + 1, temp_mult1 = __umul24(y1,32) + x1 ;
height_fn[temp_mult] = g_graph_height[thid] ;
(threadIdx.x == 31 && x < width1 - 1 ) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0;
(threadIdx.x == 0 && x > 0 ) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0;
(threadIdx.y == 7 && y < rows1 - 1 ) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0;
(threadIdx.y == 0 && y > 0 ) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0;
__syncthreads();
int flow_push = 0, min_flow_pushed = 0 ;
flow_push = g_push_reser[thid] ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
__syncthreads() ;
min_flow_pushed = g_left_weight[thid] ;
flow_push = g_push_reser[thid] ;
if(flow_push <= 0 || (g_left_weight[thid] == 0 && g_right_weight[thid] == 0 && g_down_weight[thid] == 0 && g_up_weight[thid] == 0 && g_sink_weight[thid] == 0))
g_relabel_mask[thid] = 2 ;
else
{
( flow_push > 0 && ( ( (height_fn[temp_mult] == height_fn[temp_mult-1] + 1 ) && g_left_weight[thid] > 0 ) ||( (height_fn[temp_mult] == height_fn[temp_mult+1]+1 ) && g_right_weight[thid] > 0) || ( ( height_fn[temp_mult] == height_fn[temp_mult+34]+1 ) && g_down_weight[thid] > 0) || ( (height_fn[temp_mult] == height_fn[temp_mult-34]+1 ) && g_up_weight[thid] > 0 ) || ( height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0 ) ) ) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0 ;
}
__syncthreads() ;
if( thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width-1 && x > 0 && y < rows-1 && y > 0 )
{
int temp_weight = 0;
temp_weight = g_sink_weight[thid] ;
min_flow_pushed = flow_push ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
g_sink_weight[thid] = temp_weight ;
atomicSub(&g_push_reser[thid] , min_flow_pushed);
}
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_left_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_left_weight[thid] , min_flow_pushed);
atomicAdd(&g_right_weight[thid-1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-1], min_flow_pushed);
}else atomicSub(&g_pull_left[thid-1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_up_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult - 34] + 1)
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_up_weight[thid] , min_flow_pushed);
atomicAdd(&g_down_weight[thid-width1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid-width1], min_flow_pushed);
} else atomicSub(&g_pull_up[thid - width1] , 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_right_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 1] + 1 )
{
(temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_right_weight[thid] , min_flow_pushed);
atomicAdd(&g_left_weight[thid+1],min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+1], min_flow_pushed);
}else atomicSub( &g_pull_right[thid + 1], 1) ;
flow_push = g_push_reser[thid] ;
min_flow_pushed = flow_push ;
temp_weight = g_down_weight[thid] ;
if(temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == height_fn[temp_mult + 34] + 1 )
{
(temp_weight<flow_push) ? min_flow_pushed = temp_weight : 0 ;
temp_weight = temp_weight - min_flow_pushed ;
atomicSub(&g_down_weight[thid] , min_flow_pushed);
atomicAdd(&g_up_weight[thid+width1], min_flow_pushed);
atomicSub(&g_push_reser[thid] , min_flow_pushed);
atomicAdd(&g_push_reser[thid+width1], min_flow_pushed);
}else atomicSub( &g_pull_down[thid+width1], 1) ;
}
}
|
135
|
//pass
//--blockDim=256 --gridDim=1 --no-inline
#include <cuda.h>
#include <curand_kernel.h>
#include <curand_mtgp32.h>
#include <stdio.h>
//#include <curand.h>
#define N 2 //256
__global__ void curand_test(curandStateMtgp32_t *state, float *A) {
A[threadIdx.x] = curand(&state[threadIdx.x]);
}
|
136
|
//pass
//--blockDim=32 --gridDim=2
#include "../common.h"
__global__ void Pathcalc_Portfolio_KernelGPU(float *d_v, float *d_Lb)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int threadN = blockDim.x * gridDim.x;
int i,path;
float L[NN], L2[L2_SIZE], z[NN];
float *L_b = L;
/* Monte Carlo LIBOR path calculation*/
for(path = tid; path < NPATH; path += threadN){
// initialise the data for current thread
for (i=0; i<N; i++) {
// for real application, z should be randomly generated
z[i] = 0.3;
L[i] = 0.05;
}
path_calc_b1(L, z, L2);
d_v[path] = portfolio_b(L,L_b);
path_calc_b2(L_b, z, L2);
d_Lb[path] = L_b[NN-1];
}
}
|
137
|
#include "includes.h"
__global__ void Add(float * x, size_t idx, size_t N, float W0, float W1)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
//printf("Adding %f and %f\n",x[(idx-1)*N + i], x[(idx-2)*N + i]);
//printf("idx = %d, N = %d, i = %d\n", idx, N, i);
//printf("%f %f %f %f %f %f\n", x[0], x[1], x[2], x[3], x[4], x[5]);
x[(idx-2)*N + i] = x[(idx-1)*N + i]*W0 + x[(idx-2)*N + i]*W1;
//printf("on stack %f\n",x[(idx-2)*N + i]);
//printf("%f %f %f %f\n", x[0], x[1], x[2], x[3]);//, x[4], x[5]);
}
return;
}
|
138
|
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <cuda.h>
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
unsigned char *host_s = NULL; // source image array
unsigned char *host_t = NULL; // target image array
FILE *fp_s = NULL; // source file handler
FILE *fp_t = NULL; // target file handler
unsigned int width, height; // image width, image height
unsigned int rgb_raw_data_offset; // RGB raw data offset
unsigned char bit_per_pixel; // bit per pixel
unsigned short byte_per_pixel; // byte per pixel
// bitmap header
unsigned char header[54] = {
0x42, // identity : B
0x4d, // identity : M
0, 0, 0, 0, // file size
0, 0, // reserved1
0, 0, // reserved2
54, 0, 0, 0, // RGB data offset
40, 0, 0, 0, // struct BITMAPINFOHEADER size
0, 0, 0, 0, // bmp width
0, 0, 0, 0, // bmp height
1, 0, // planes
24, 0, // bit per pixel
0, 0, 0, 0, // compression
0, 0, 0, 0, // data size
0, 0, 0, 0, // h resolution
0, 0, 0, 0, // v resolution
0, 0, 0, 0, // used colors
0, 0, 0, 0 // important colors
};
// sobel mask (5x5 version)
// Task 2: Put mask[][][] into Shared Memroy
int mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int read_bmp (const char *fname_s) {
fp_s = fopen(fname_s, "rb");
if (fp_s == NULL) {
printf("fopen fp_s error\n");
return -1;
}
// move offset to 10 to find rgb raw data offset
fseek(fp_s, 10, SEEK_SET);
fread(&rgb_raw_data_offset, sizeof(unsigned int), 1, fp_s);
// move offset to 18 to get width & height;
fseek(fp_s, 18, SEEK_SET);
fread(&width, sizeof(unsigned int), 1, fp_s);
fread(&height, sizeof(unsigned int), 1, fp_s);
// get bit per pixel
fseek(fp_s, 28, SEEK_SET);
fread(&bit_per_pixel, sizeof(unsigned short), 1, fp_s);
byte_per_pixel = bit_per_pixel / 8;
// move offset to rgb_raw_data_offset to get RGB raw data
fseek(fp_s, rgb_raw_data_offset, SEEK_SET);
// Task 3: Assign host_s to Pinnned Memory
// Hint : err = cudaMallocHost ( ... )
// if (err != CUDA_SUCCESS)
//host_s = (unsigned char *) malloc((size_t)width * height * byte_per_pixel);
int err = cudaMallocHost(&host_s,(size_t)width * height * byte_per_pixel);
if (err != CUDA_SUCCESS) {
printf("malloc images_s error\n");
return -1;
}
// Task 3: Assign host_t to Pinned Memory
// Hint : err = cudaMallocHost ( ... )
// if (err != CUDA_SUCCESS)
//host_t = (unsigned char *) malloc((size_t) width * height * byte_per_pixel);
err = cudaMallocHost(&host_t,(size_t)width * height * byte_per_pixel);
if (err != CUDA_SUCCESS) {
printf("malloc host_t error\n");
return -1;
}
fread(host_s, sizeof(unsigned char), (size_t)(long) width * height * byte_per_pixel, fp_s);
return 0;
}
// declare this as global !!!!
__global__ void sobel (unsigned char *host_s, unsigned char *host_t,
int *mask_, unsigned int width, unsigned int height,
unsigned short byte_per_pixel)
{
int x, y, i, v, u; // for loop counter
int R, G, B; // color of R, G, B
double val[MASK_N*3] = {0.0};
int adjustX, adjustY, xBound, yBound;
// Task 2: Put mask[][][] into Shared Memory
// Hint : Please declare it in kernel function
// Then use some threads to move data from global memory to shared memory
// Remember to __syncthreads() after it's done <WHY?>
// put mask into share !!!!!!!
__shared__ int mask[MASK_N][MASK_X][MASK_Y];
int thdIdx_x = threadIdx.x;
if(thdIdx_x < MASK_X){
for(int i = 0; i < MASK_N; i++){
for(int j = 0; j < MASK_Y; j++){
mask[i][thdIdx_x][j] = mask_[i * MASK_X * MASK_Y + thdIdx_x * MASK_Y + j];
}
}
}
__syncthreads();
// Task 1: Relabel x or y or both into combination of blockIdx, threadIdx ... etc
// Hint A: We do not have enough threads for each pixels in the image, so what should we do?
// Hint B: Maybe you can map each y to different threads in different blocks
for (y = blockIdx.x; y < blockIdx.x+1; ++y) {
for (x = threadIdx.x; x < width; x+=64) {
for (i = 0; i < MASK_N; ++i) {
adjustX = (MASK_X % 2) ? 1 : 0;
adjustY = (MASK_Y % 2) ? 1 : 0;
xBound = MASK_X /2;
yBound = MASK_Y /2;
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; ++v) {
for (u = -xBound; u < xBound + adjustX; ++u) {
if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
R = host_s[byte_per_pixel * (width * (y+v) + (x+u)) + 2];
G = host_s[byte_per_pixel * (width * (y+v) + (x+u)) + 1];
B = host_s[byte_per_pixel * (width * (y+v) + (x+u)) + 0];
val[i*3+2] += R * mask[i][u + xBound][v + yBound];
val[i*3+1] += G * mask[i][u + xBound][v + yBound];
val[i*3+0] += B * mask[i][u + xBound][v + yBound];
}
}
}
}
double totalR = 0.0;
double totalG = 0.0;
double totalB = 0.0;
for (i = 0; i < MASK_N; ++i) {
totalR += val[i * 3 + 2] * val[i * 3 + 2];
totalG += val[i * 3 + 1] * val[i * 3 + 1];
totalB += val[i * 3 + 0] * val[i * 3 + 0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
host_t[byte_per_pixel * (width * y + x) + 2] = cR;
host_t[byte_per_pixel * (width * y + x) + 1] = cG;
host_t[byte_per_pixel * (width * y + x) + 0] = cB;
}
}
}
int write_bmp (const char *fname_t) {
unsigned int file_size;
fp_t = fopen(fname_t, "wb");
if (fp_t == NULL) {
printf("fopen fname_t error\n");
return -1;
}
// file size
file_size = width * height * byte_per_pixel + rgb_raw_data_offset;
header[2] = (unsigned char)(file_size & 0x000000ff);
header[3] = (file_size >> 8) & 0x000000ff;
header[4] = (file_size >> 16) & 0x000000ff;
header[5] = (file_size >> 24) & 0x000000ff;
// width
header[18] = width & 0x000000ff;
header[19] = (width >> 8) & 0x000000ff;
header[20] = (width >> 16) & 0x000000ff;
header[21] = (width >> 24) & 0x000000ff;
// height
header[22] = height &0x000000ff;
header[23] = (height >> 8) & 0x000000ff;
header[24] = (height >> 16) & 0x000000ff;
header[25] = (height >> 24) & 0x000000ff;
// bit per pixel
header[28] = bit_per_pixel;
// write header
fwrite(header, sizeof(unsigned char), rgb_raw_data_offset, fp_t);
// write image
fwrite(host_t, sizeof(unsigned char), (size_t)(long)width * height * byte_per_pixel, fp_t);
fclose(fp_s);
fclose(fp_t);
return 0;
}
int main(int argc, char **argv) {
// initialize
cudaSetDevice(0);
assert(argc == 3);
const char* input = argv[1];
const char* output = argv[2];
read_bmp(input); // 24 bit gray level image
// Task 1: Allocate memory on GPU
// Hint : cudaMalloc ()
// What do we need to store on GPU? (input image, output image, ...)
// declare image array
unsigned char *host_s_ = NULL;
unsigned char *host_t_ = NULL;
int *mask_ = NULL;
cudaMalloc((void**)&host_s_, (size_t)width * height * byte_per_pixel);
cudaMalloc((void**)&host_t_, (size_t)width * height * byte_per_pixel);
cudaMalloc((void**)&mask_, (size_t) sizeof(int) * MASK_N * MASK_Y * MASK_X);
// Task 1: Memory copy from Host to Device (GPU)
// Hint : cudaMemcpy ( ... , cudaMemcpyHostToDevice )
cudaMemcpy(host_s_, host_s, width * height * byte_per_pixel, cudaMemcpyHostToDevice);
cudaMemcpy(mask_, mask, sizeof(int) * MASK_N * MASK_Y * MASK_X, cudaMemcpyHostToDevice);
// Task 1: Modify sobel() to CUDA kernel function
// Hint : sobel_Kernel <<< ??? , ??? >>> ( ??? );
sobel <<<height, 64>>> (host_s_, host_t_, mask_, width, height, byte_per_pixel);
// Task 1: Memory Copy from Device (GPU) to Host
// Hint : cudaMemcpy ( ... , cudaMemcpyDeviceToHost )
cudaMemcpy(host_t, host_t_, (size_t)width * height * byte_per_pixel, cudaMemcpyDeviceToHost);
// Task 1: Free memory on device
// Hint : cudaFree ( ... )
cudaFree(host_s_);
cudaFree(host_t_);
cudaFree(mask_);
write_bmp(output);
// Task 3: Free Pinned memory
// Hint : replace free ( ... ) by cudaFreeHost ( ... )
cudaFreeHost (host_s);
cudaFreeHost (host_t);
}
|
139
|
#include <stdio.h>
extern "C"
{
__global__ void GPU_add(
int n,
int* d_a,
int* d_b
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
d_a[i] += d_b[i];
}
}
}
|
140
|
__constant__ unsigned int pentanomial[5];
|
141
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <iostream>
#define ULI unsigned long int
__global__ void fibonacci_kernel(ULI* a, int start) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int index = i + start;
if (i < 2 * start - 1)
a[index] = (a[start - 2] * a[i]) + (a[start - 1] * a[i + 1]);
}
class FibonacciDynamicProgramming {
public:
int numElements, sizeInBytes;
explicit FibonacciDynamicProgramming(int numElements);
void run(int numThreads) const;
};
FibonacciDynamicProgramming::FibonacciDynamicProgramming(int numElements) {
this->numElements = numElements;
this->sizeInBytes = numElements * sizeof(ULI);
}
void FibonacciDynamicProgramming::run(int numThreads) const {
int deviceId = cudaGetDevice(&deviceId);
printf("GPU Device ID: %d\n", deviceId);
printf("CPU Device ID: %d\n\n", cudaCpuDeviceId);
ULI startingElements[3] = {1, 1, 2};
ULI* deviceArray;
ULI resultArray[numThreads];
cudaMalloc(&deviceArray, sizeInBytes);
cudaMemcpy(deviceArray, startingElements, sizeof(startingElements), cudaMemcpyHostToDevice);
unsigned int start = 3;
while (start <= numElements / 2 ) {
unsigned int numBlocks = (start - 1) / numThreads;
if ((start - 1) % numThreads != 0)
numBlocks++;
fibonacci_kernel<<<numBlocks, numThreads>>>(deviceArray, start);
start = 2 * start - 1;
}
cudaMemcpy(resultArray, deviceArray, sizeInBytes, cudaMemcpyDeviceToHost);
for (int i = 0; i < numElements; i++) {
printf("%d:\t%lu \n", i + 1, resultArray[i]);
}
cudaFree(deviceArray);
}
int main() {
FibonacciDynamicProgramming program(16);
program.run(16);
}
|
142
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime_api.h>
#include <iostream>
__global__ void kernel(float * d_matrix, size_t pitch, size_t rows, size_t cols) {
int count = 1;
for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < rows; j += blockDim.y * gridDim.y)
{
float* row_d_matrix = (float*)((char*)d_matrix + j*pitch);
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < cols; i += blockDim.x * gridDim.x)
{
row_d_matrix[i] = count;
count++;
}
}
// d_matrix[0] = 1;
}
int main(int argc, char **argv)
{
// device pointers.
float *d_pitch;
float *d_normal;
// matrix size.
size_t cols = 128;
size_t rows = 16;
size_t pitch = 0;
// alloc the data form gpu memory.
cudaMallocPitch((void**)&d_pitch, &pitch, cols*sizeof(float), rows);
cudaMalloc((void**)(&d_normal), rows*cols*sizeof(float));
// test the data address.
fprintf(stdout, "row size(in bytes) = %.2f*128.\n", pitch/128.0f);
std::cout<<"d_pitch:"<<d_pitch<<std::endl;
std::cout<<"d_normal:"<<d_normal<<std::endl;
std::cout<<"pitch:"<<pitch<<std::endl;
std::cout<<"occupy_num:"<<pitch/sizeof(float)<<std::endl;
std::cout<<"sizeof(float):"<<sizeof(float)<<std::endl;
fprintf(stdout, "the head address of d_pitch mod 128 = %x.\n", ((long)d_pitch)%128);
fprintf(stdout, "the head address of d_normal mod 128 = %x.\n", ((long)d_normal)%128);
float *d_matrix;
float *dc_matrix;
dc_matrix = (float*)malloc(sizeof(float)* cols * rows);
cudaMallocPitch(&d_matrix, &pitch, cols*sizeof(float), rows);
kernel<<<128,128>>>(d_matrix, pitch, rows, cols);
// cudaMemcpy2D(dc_matrix, cols * sizeof(float), d_matrix, pitch, cols * sizeof(float), rows, cudaMemcpyDeviceToHost);
cudaMemcpy(dc_matrix, d_matrix, rows*cols * sizeof(float), cudaMemcpyDeviceToHost);
int count = 0;
for(int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
std::cout<<dc_matrix[count]<<" ";
++count;
}
std::cout<<std::endl;
}
cudaFree(d_matrix);
free(dc_matrix);
cudaFree(d_pitch);
cudaFree(d_normal);
return 0;
}
|
143
|
#include <stdio.h>
#include <cuda_runtime.h>
int main(void){
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf("the avalible device count is %d\n", deviceCount);
return 0;
}
|
144
|
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#define DEFAULT_THRESHOLD 4000
#define DEFAULT_FILENAME "bw_stopsign.ppm"
#include <sys/time.h>
using namespace std;
void write_ppm( char*, int, int, int, int*);
unsigned int *read_ppm( char *, int *, int *, int *);
__global__ void sobel (int * result, unsigned int * pic, int xsize, int ysize, int thresh) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int magnitude, sum1, sum2;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
if ((sum1*sum1 + sum2*sum2)>thresh) {
magnitude = 255;
} else {
magnitude = 0;
}
//printf("i j %d %d %d\n",i,j,pic[i*j]);
result[i*xsize+j] = magnitude;
}
int main(int argc,char ** argv){
int thresh = DEFAULT_THRESHOLD;
char *filename;
filename = strdup( DEFAULT_FILENAME);
if (argc > 1) {
if (argc == 3) { // filename AND threshold
filename = strdup( argv[1]);
thresh = atoi( argv[2] );
}
if (argc == 2) { // default file but specified threshhold
thresh = atoi( argv[1] );
}
fprintf(stderr, "file %s threshold %d\n", filename, thresh);
}
int xsize, ysize, maxval;
unsigned int * pic = read_ppm( filename, &xsize, &ysize, &maxval );
int numbytes = xsize * ysize * 3 * sizeof( int );
int *result = (int *) malloc( numbytes );
if (!result) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1); // fail
}
int *out = result;
for (int col=0; col<ysize; col++) {
for (int row=0; row<xsize; row++) {
*out++ = 0;
}
}
cudaEvent_t start=0;
cudaEvent_t stop=0;
float time =0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
clock_t start_st = clock(), diff;
int *result_d;
unsigned int *pic_d;
int size = xsize*ysize*sizeof(int);
cudaMalloc((void**)&pic_d,size);
cudaMalloc((void**)&result_d,size);
cudaMemcpy(result_d,result,size,cudaMemcpyHostToDevice);
cudaMemcpy(pic_d,pic,size,cudaMemcpyHostToDevice);
dim3 blocks(16,16);
dim3 grid(xsize/blocks.x, ysize/blocks.y);
cudaEventRecord(start,0);
sobel<<<grid,blocks>>>(result_d, pic_d, xsize, ysize, thresh);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop);
cudaMemcpy(result,result_d,size,cudaMemcpyDeviceToHost);
cudaFree(result_d);
cudaFree(pic_d);
diff = clock() - start_st;
int msec = diff * 1000 / CLOCKS_PER_SEC;
printf("Kernel time (with memcpy): %d s %d ms\n", msec/1000, msec%1000);
printf("Kernel time (without memcpy): %.4f\n",time);
//write_ppm( "result.ppm", xsize, ysize, 255, result);
fprintf(stderr, "sobel done\n");
return 0;
}
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp) {
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6') {
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') { // comment line!
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// TODO really read
char duh[80];
char *line = chars;
// find the start of the pixel data. no doubt stupid
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %ld\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %d bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++) {
pic[i] = (int) buf[3*i]; // red channel
}
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
//never used
//int x,y;
fp = fopen(filename, "w");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n",filename);
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
|
145
|
#include "includes.h"
__global__ void kernel_hadamard_sum(int N, double *y, double *x, double *w){
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
/* make sure to use only N threads */
if (tid<N) {
y[tid]+=x[tid]*w[tid];
}
}
|
146
|
#include <bits/stdc++.h>
using namespace std;
#define __ ios_base::sync_with_stdio(false);cin.tie(NULL);
#define endl '\n'
#define KERNEL_SIZE 3
#define BLOCK_SIZE 4
#define gpu_error(ans) { gpu_assert((ans), __LINE__); }
__constant__ int d_cachekernel[KERNEL_SIZE];
inline void gpu_assert(cudaError_t code, int line){
if (code != cudaSuccess)
cerr<<"GPUerror: "<<cudaGetErrorString(code)<<" in "<< line<<endl;
}
int size(int n, int m){
return (n + m -1) / m;
}
void comp(int *a, int *b, int n) {
for(int i = 0; i < n; i++) {
if(a[i] != b[i]){
cout<<":("<<endl;
return;
}
}
cout<<":)"<<endl;
}
void print(int *vec, int n) {
for(int i = 0; i < n; i++)
cout << vec[i] << " ";
cout<<endl;
}
void convolSec(int *vector, int *kernel, int *out, int n) {
int tmp;
for(int i = 0; i < n; i++) {
out[i] = 0;
tmp = i - KERNEL_SIZE/2;
for(int j = 0; j < KERNEL_SIZE; j++) {
if(tmp + j < 0 or tmp + j >= n)
continue;
out[i] += vector[tmp + j] * kernel[j];
}
}
}
__global__ void convolParB(int *d_vec, int *d_kernel, int *d_out, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tmp = i - (KERNEL_SIZE/2), sum = 0;
for(int j = 0; j < KERNEL_SIZE; j++) {
if(tmp + j >= 0 and tmp + j < n)
sum += d_vec[tmp + j] * d_kernel[j];
}
//__syncthreads();
if(i < n)
d_out[i] = sum;
}
__global__ void convolParC(int *d_vec, int *d_out, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tmp = i - (KERNEL_SIZE/2), sum = 0;
for(int j = 0; j < KERNEL_SIZE; j++) {
if(tmp + j >= 0 and tmp + j < n)
sum += d_vec[tmp + j] * d_cachekernel[j];
}
//__syncthreads();
if(i < n)
d_out[i] = sum;
}
__global__ void convolParT(int *d_vec, int *d_out, int n) {
__shared__ int tile[BLOCK_SIZE + KERNEL_SIZE - 1];
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
int left = ((blockIdx.x - 1) * blockDim.x) + threadIdx.x;
int rigth = ((blockIdx.x + 1) * blockDim.x) + threadIdx.x;
int wn = KERNEL_SIZE/2;
if(threadIdx.x >= blockDim.x - wn)
tile[threadIdx.x - (blockDim.x - wn)] = (left < 0)? 0 : d_vec[left];
//tile[threadIdx.x - blockDim.x + wn] = (left < 0)? 0 : d_vec[left];
tile[wn + threadIdx.x] = (i < n)? d_vec[i] : 0;
if(threadIdx.x <= wn)
tile[threadIdx.x + blockDim.x + wn] = (rigth >= n)? 0 : d_vec[rigth];
__syncthreads();
int tmp = 0;
for(int j = 0; j < KERNEL_SIZE; j++) {
tmp += tile[threadIdx.x + j] * d_cachekernel[j];
//d_out[i]= tile[i+1];
}
d_out[i] = tmp;
}
int main(){__
int *h_kernel = new int[KERNEL_SIZE];
int *h_vec, *h_out, *out_d;
int n; cin>>n;
out_d = new int[n];
h_vec = new int[n];
h_out = new int[n];
for(int i = 0; i < KERNEL_SIZE; i++)
h_kernel[i] = 1;
for(int i = 0; i < n; i++)
h_vec[i] = (i % 500);
// <--------- Secuencial ----------->
double a,b;
clock_t t = clock();
convolSec(h_vec, h_kernel, h_out, n);
t = clock() - t;
a = ((float)t)/CLOCKS_PER_SEC;
cout<<"Secuencial: "<<a<<endl;
// <--------- Paralelo --------->
int *d_vec, *d_out, *d_kernel;
//convolParB<<< size(n, BLOCK_SIZE), BLOCK_SIZE >>> (d_vec, d_kernel, d_out, n);
int sz = sizeof(int) * n;
t = clock();
gpu_error(cudaMalloc(&d_kernel, sizeof(int) * KERNEL_SIZE));
gpu_error(cudaMemcpy(d_kernel, h_kernel, sizeof(int)*KERNEL_SIZE, cudaMemcpyHostToDevice));
gpu_error(cudaMalloc(&d_vec, sz));
gpu_error(cudaMemcpy(d_vec, h_vec, sz, cudaMemcpyHostToDevice));
gpu_error(cudaMalloc(&d_out, sz ));
convolParB<<< size(n, BLOCK_SIZE), BLOCK_SIZE >>> (d_vec, d_kernel, d_out, n);
gpu_error(cudaGetLastError());
cudaDeviceSynchronize();
gpu_error(cudaMemcpy(out_d, d_out, sz, cudaMemcpyDeviceToHost));
t = clock() - t;
b = ((float)t)/CLOCKS_PER_SEC;
cout<<"Paralelo naive: "<< b <<endl;
cout<<"Aceleracion naive: "<< a/b <<endl;
cudaFree(d_kernel);
cudaFree(d_vec);
cudaFree(d_out);
// <--------- Paralelo Cache--------->
t = clock();
gpu_error(cudaMemcpyToSymbol(d_cachekernel, h_kernel, sizeof(int) * KERNEL_SIZE));
gpu_error(cudaMalloc(&d_vec, sz));
gpu_error(cudaMemcpy(d_vec, h_vec, sz, cudaMemcpyHostToDevice));
gpu_error(cudaMalloc(&d_out, sz ));
//convolParB<<< size(n, BLOCK_SIZE), BLOCK_SIZE >>> (d_vec, d_kernel, d_out, n);
convolParC<<< size(n, BLOCK_SIZE), BLOCK_SIZE >>> (d_vec, d_out, n);
gpu_error(cudaGetLastError());
cudaDeviceSynchronize();
gpu_error(cudaMemcpy(out_d, d_out, sz, cudaMemcpyDeviceToHost));
t = clock() - t;
b = ((float)t)/CLOCKS_PER_SEC;
cout<<"Paralelo cache: "<< b <<endl;
cout<<"Aceleracion cache: "<< a/b <<endl;
cudaFree(d_vec);
cudaFree(d_out);
// <--------- Paralelo Cache tiled--------->
t = clock();
gpu_error(cudaMemcpyToSymbol(d_cachekernel, h_kernel, sizeof(int) * KERNEL_SIZE));
gpu_error(cudaMalloc(&d_vec, sz));
gpu_error(cudaMemcpy(d_vec, h_vec, sz, cudaMemcpyHostToDevice));
gpu_error(cudaMalloc(&d_out, sz ));
convolParT<<< size(n, BLOCK_SIZE), BLOCK_SIZE >>> (d_vec, d_out, n);
gpu_error(cudaGetLastError());
cudaDeviceSynchronize();
gpu_error(cudaMemcpy(out_d, d_out, sz, cudaMemcpyDeviceToHost));
t = clock() - t;
b = ((float)t)/CLOCKS_PER_SEC;
cout<<"Paralelo tiled: "<< b <<endl;
cout<<"Aceleracion tiled: "<< a/b <<endl;
cudaFree(d_vec);
cudaFree(d_out);
delete[] h_kernel;
delete[] h_vec;
delete[] h_out;
delete[] out_d;
return 0;
}
|
147
|
/*
* Solves the Panfilov model using an explicit numerical scheme.
* Based on code orginally provided by Xing Cai, Simula Research Laboratory
* and reimplementation by Scott B. Baden, UCSD
*
* Modified and restructured by Didem Unat, Koc University
*
*/
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <string.h>
#include <math.h>
#include <sys/time.h>
using namespace std;
// Utilities
//
// Timer
// Make successive calls and take a difference to get the elapsed time.
static const double kMicro = 1.0e-6;
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
if(RC == -1) {
cerr << "ERROR: Bad call to gettimeofday" << endl;
return(-1);
}
return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) );
} // end getTime()
// Allocate a 2D array
double **alloc2D(int m,int n){
double **E;
int nx=n, ny=m;
E = (double**)malloc(sizeof(double*)*ny + sizeof(double)*nx*ny);
assert(E);
int j;
for(j=0;j<ny;j++)
E[j] = (double*)(E+ny) + j*nx;
return(E);
}
// Reports statistics about the computation
// These values should not vary (except to within roundoff)
// when we use different numbers of processes to solve the problem
double stats(double **E, int m, int n, double *_mx){
double mx = -1;
double l2norm = 0;
int i, j;
for (j=1; j<=m; j++)
for (i=1; i<=n; i++) {
l2norm += E[j][i]*E[j][i];
if (E[j][i] > mx)
mx = E[j][i];
}
*_mx = mx;
l2norm /= (double) ((m)*(n));
l2norm = sqrt(l2norm);
return l2norm;
}
// External functions
extern "C" {
void splot(double **E, double T, int niter, int m, int n);
}
void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int&num_threads);
__device__
int pos(int j, int i, int m, int n)
{
return j*n + i;
}
__global__
void copyGhostRegion(double* Eprev, int m, int n)
{
int leftTo = pos(threadIdx.x+1, 0, m, n);
int leftFrom = pos(threadIdx.x+1, 2, m, n);
Eprev[leftTo] = Eprev[leftFrom];
int rightTo = pos(threadIdx.x+1, n+1, m, n);
int rightFrom = pos(threadIdx.x+1, n-1, m, n);
Eprev[rightTo] = Eprev[rightFrom];
int topTo = pos(0, threadIdx.x+1, m, n);
int topFrom = pos(2, threadIdx.x+1, m, n);
Eprev[topTo] = Eprev[topFrom];
int bottomTo = pos(m+1, threadIdx.x+1, m, n);
int bottomFrom = pos(m-1, threadIdx.x+1, m, n);
Eprev[bottomTo] = Eprev[bottomFrom];
}
__global__
void PDE(double* E, double* Eprev, double alpha, int m, int n)
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
int target = pos(row, col, m, n);
int right = pos(row, col+1, m, n);
int left = pos(row, col-1, m, n);
int up = pos(row-1, col, m, n);
int down = pos(row+1, col, m, n);
E[target] = Eprev[target] + alpha*(Eprev[right]+Eprev[left]-4*Eprev[target]+Eprev[up]+Eprev[down]);
}
__global__
void ODE_excitation(double* E, double* R, double dt, double kk, double a, int m, int n)
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
int target = pos(row, col, m, n);
E[target] = E[target] - dt*(kk*E[target]*(E[target]-a)*(E[target]-1) + E[target]*R[target]);
}
__global__
void ODE_recovery(double* E, double* R, double dt, double epsilon, double M1, double M2, double kk, double b, int m, int n)
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
int target = pos(row, col, m, n);
R[target] = R[target] + dt*(epsilon + M1*R[target]/(E[target] + M2)) * (-R[target] - kk*E[target]*(E[target] - b - 1));
}
void simulate (double** E, double** E_prev,double** R,
double* d_E, double* d_Eprev, double* d_R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
int i, j;
/*
* Copy data from boundary of the computational box
* to the padding region, set up for differencing
* on the boundary of the computational box
* Using mirror boundaries
*/
int size = sizeof(double)*(m+2)*(n+2);
cudaMemcpy(d_Eprev, (double*)E_prev+(m+2), size, cudaMemcpyHostToDevice);
dim3 ghostBlock(128, 1, 1);
dim3 ghostGrid(m/ghostBlock.x, 1);
copyGhostRegion<<< ghostGrid, ghostBlock >>>(d_Eprev, m, n);
cudaThreadSynchronize();
cudaMemcpy((double*)E_prev+(m+2), d_Eprev, size, cudaMemcpyDeviceToHost);
/*
int ntx = 16, nty = 16;
dim3 tblock(ntx, nty, 1);
dim3 grid(n/tblock.x, m/tblock.y);
PDE<<< grid, tblock >>>(d_E, d_Eprev, alpha, m, n);
ODE_excitation<<< grid, tblock >>>(d_E, d_R, dt, kk, a, m, n);
ODE_recovery<<< grid, tblock >>>(d_E, d_R, dt, epsilon, M1, M2, kk, b, m, n);
*/
// Solve for the excitation, the PDE
for (j=1; j<=m; j++){
for (i=1; i<=n; i++) {
E[j][i] = E_prev[j][i]+alpha*(E_prev[j][i+1]+E_prev[j][i-1]-4*E_prev[j][i]+E_prev[j+1][i]+E_prev[j-1][i]);
}
}
/*
* Solve the ODE, advancing excitation and recovery to the
* next timtestep
*/
for (j=1; j<=m; j++){
for (i=1; i<=n; i++)
E[j][i] = E[j][i] -dt*(kk* E[j][i]*(E[j][i] - a)*(E[j][i]-1)+ E[j][i] *R[j][i]);
}
for (j=1; j<=m; j++){
for (i=1; i<=n; i++)
R[j][i] = R[j][i] + dt*(epsilon+M1* R[j][i]/( E[j][i]+M2))*(-R[j][i]-kk* E[j][i]*(E[j][i]-b-1));
}
}
// Main program
int main (int argc, char** argv)
{
/*
* Solution arrays
* E is the "Excitation" variable, a voltage
* R is the "Recovery" variable
* E_prev is the Excitation variable for the previous timestep,
* and is used in time integration
*/
double **E, **R, **E_prev;
// Various constants - these definitions shouldn't change
const double a=0.1, b=0.1, kk=8.0, M1= 0.07, M2=0.3, epsilon=0.01, d=5e-5;
double T=1000.0;
int m=200,n=200;
int plot_freq = 0;
int px = 1, py = 1;
int no_comm = 0;
int num_threads=1;
cmdLine( argc, argv, T, n,px, py, plot_freq, no_comm, num_threads);
m = n;
// Allocate contiguous memory for solution arrays
// The computational box is defined on [1:m+1,1:n+1]
// We pad the arrays in order to facilitate differencing on the
// boundaries of the computation box
E = alloc2D(m+2,n+2);
E_prev = alloc2D(m+2,n+2);
R = alloc2D(m+2,n+2);
int i,j;
// Initialization
for (j=1; j<=m; j++)
for (i=1; i<=n; i++)
E_prev[j][i] = R[j][i] = 0;
for (j=1; j<=m; j++)
for (i=n/2+1; i<=n; i++)
E_prev[j][i] = 1.0;
for (j=m/2+1; j<=m; j++)
for (i=1; i<=n; i++)
R[j][i] = 1.0;
double dx = 1.0/n;
// For time integration, these values shouldn't change
double rp= kk*(b+1)*(b+1)/4;
double dte=(dx*dx)/(d*4+((dx*dx))*(rp+kk));
double dtr=1/(epsilon+((M1/M2)*rp));
double dt = (dte<dtr) ? 0.95*dte : 0.95*dtr;
double alpha = d*dt/(dx*dx);
cout << "Grid Size : " << n << endl;
cout << "Duration of Sim : " << T << endl;
cout << "Time step dt : " << dt << endl;
cout << "Process geometry: " << px << " x " << py << endl;
if (no_comm)
cout << "Communication : DISABLED" << endl;
cout << endl;
// Start the timer
double t0 = getTime();
// Simulated time is different from the integer timestep number
// Simulated time
double t = 0.0;
// Integer timestep number
int niter=0;
int size = sizeof(double) * (m+2) * (n+2);
double *d_E, *d_Eprev, *d_R;
cudaMalloc((void**) &d_E, size);
cudaMemcpy(d_E, (double*)E+(m+2), size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_Eprev, size);
//cudaMemcpy(d_Eprev, (double*)E_prev+(m+2), size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_R, size);
cudaMemcpy(d_R, (double*)R+(m+2), size, cudaMemcpyHostToDevice);
while (t<T) {
t += dt;
niter++;
simulate(E, E_prev, R, d_E, d_Eprev, d_R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
//swap current E with previous E
double **tmp = E; E = E_prev; E_prev = tmp;
if (plot_freq){
int k = (int)(t/plot_freq);
if ((t - k * plot_freq) < dt){
//cudaMemcpy((double*)E+(m+2), d_E, size, cudaMemcpyDeviceToHost);
splot(E,t,niter,m+2,n+2);
}
}
}//end of while loop
double time_elapsed = getTime() - t0;
double Gflops = (double)(niter * (1E-9 * n * n ) * 28.0) / time_elapsed ;
double BW = (double)(niter * 1E-9 * (n * n * sizeof(double) * 4.0 ))/time_elapsed;
cout << "Number of Iterations : " << niter << endl;
cout << "Elapsed Time (sec) : " << time_elapsed << endl;
cout << "Sustained Gflops Rate : " << Gflops << endl;
cout << "Sustained Bandwidth (GB/sec): " << BW << endl << endl;
double mx;
//cudaMemcpy((double*)E_prev+(m+2), d_Eprev, size, cudaMemcpyDeviceToHost);
double l2norm = stats(E_prev,m,n,&mx);
cout << "Max: " << mx << " L2norm: "<< l2norm << endl;
if (plot_freq){
cout << "\n\nEnter any input to close the program and the plot..." << endl;
getchar();
}
cudaFree(d_E); cudaFree(d_Eprev); cudaFree(d_R);
free (E);
free (E_prev);
free (R);
return 0;
}
|
148
|
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <sys/time.h>
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
float* h_malloc_device (float* h_mem, const int length) {
float* d_mem;
cudaErrChk(cudaMalloc ((void **)&d_mem, sizeof(float)*length))
if (h_mem != nullptr) {
cudaErrChk(cudaMemcpy (d_mem, h_mem, sizeof(float)*length, cudaMemcpyHostToDevice))
}
return d_mem;
}
void h_initialize_host(float* A, float* B, float* C, const int length) {
for (int i=0; i<length; i++) {
A[i] = i;
B[i] = i;
C[i] = 0.0;
}
}
void h_check_result(const float* A, const float* B, const float* C, const int length) {
bool success = true;
for (int i=0; i<length; i++) {
if (A[i] + B[i] != C[i]) {
printf("[ERR] result: %f + %f != %f\n", A[i], B[i], C[i]);
success = false;
}
}
if (success == true)
printf("Checking results succeed\n");
}
__global__ void vectorAdd (const float *A, const float *B, float *C, int length) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < length) {
C[i] = A[i] + B[i];
}
}
int main(void) {
/*** Configuration ***/
const int length = pow(2, 24);
struct timeval stime, etime;
printf("[Vec addiotion of %d elements]\n", length);
/*** Host memory ***/
float *h_A = (float*) malloc (length*sizeof(float));
float *h_B = (float*) malloc (length*sizeof(float));
float *h_C = (float*) malloc (length*sizeof(float));
h_initialize_host(h_A, h_B, h_C, length);
/*** Device memory ***/
float *d_A=h_malloc_device(h_A, length);;
float *d_B=h_malloc_device(h_B, length);;
float *d_C=h_malloc_device(nullptr, length);;
/*** Launch kernel ***/
int numThreads = pow(2,10);
int numBlocks = (length+numThreads-1) / numThreads;
gettimeofday(&stime, NULL);
printf("CUDA kernel launched with <<%d, %d>>\n", numBlocks, numThreads);
vectorAdd<<<numBlocks, numThreads>>> (d_A, d_B, d_C, length);
cudaDeviceSynchronize();
gettimeofday(&etime, NULL);
printf(" Elaped time: %.4f\n", (etime.tv_sec - stime.tv_sec) + ((etime.tv_usec-stime.tv_usec)*1e-6));
cudaErrChk( cudaGetLastError() );
/*** Memcpy from device to host ***/
cudaErrChk( cudaMemcpy(h_C, d_C, sizeof(float)*length, cudaMemcpyDeviceToHost) );
h_check_result(h_A, h_B, h_C, length);
/*** Memory free ***/
cudaDeviceSynchronize();
cudaErrChk(cudaFree(d_A));
cudaErrChk(cudaFree(d_B));
cudaErrChk(cudaFree(d_C));
free (h_A);
free (h_B);
free (h_C);
return 0;
}
|
149
|
#include <stdio.h>
#include <stdlib.h>
#define LEN_F 3073
#define TILE_WIDTH 32
// 3073/32 = 97.
__global__ void sgd(float *x, float* y, float* weights,
float *single_dw, /* dw computed by one data point, with size (3073, 10) */
float reg_strength,
float learning_rate,
int total_examples,
float *dot_XW,
float *loss) /* dot_XW is with size (10, 1) */
{
/* blockDim.x = 10, blockDim.y = 32 */
int tx = threadIdx.x; //10
int ty = threadIdx.y; //32
float tmp_w, tmp_dw;
int yi, t, data_point;
__shared__ float weights_shared[TILE_WIDTH][10];
__shared__ float x_shared[TILE_WIDTH];
__shared__ float ds[10];
__shared__ float sum_ds;
__shared__ float distance[10];
__shared__ float loss_i[10];
__shared__ float W_square;
__shared__ float sum_loss;
float W_square_single = 0;
float sum_value=0;
// 2D block, (10, 32, 1)
if (tx == 0 && ty ==0) {
sum_loss = 0;
W_square = 0;
}
for(data_point =0; data_point < total_examples; data_point++) {
for (t = 0; t < (LEN_F-1)/TILE_WIDTH + 1; t++) {
if ((t * TILE_WIDTH + ty) < LEN_F)
weights_shared[ty][tx] = weights[(t * TILE_WIDTH + ty)* 10 + tx];
else
weights_shared[ty][tx] =0;
if( (t*TILE_WIDTH+ty) < LEN_F)
x_shared[ty] = x[data_point * LEN_F + t *TILE_WIDTH + ty];
else
x_shared[ty]=0;
__syncthreads();
for(int k=0 ; k < TILE_WIDTH; k++)
sum_value+= x_shared[k] * weights_shared[k][tx];
}//end--of--tile
// tx is the indexing of column {0, 1, 2, ..., 9}
dot_XW[tx] = sum_value;
__syncthreads();
// dot_XW should finish updating by all threads
if(ty==0) {
yi = (int) y[data_point]; //6
distance[tx] = dot_XW[tx] - dot_XW[yi] + 1;
}
__syncthreads();
if(ty==0) {
if (distance[tx] > 0) {
ds[tx] = 1;
} else {
ds[tx] = 0;
}
ds[yi] = 0;
atomicAdd(&sum_ds, ds[tx]);
}
__syncthreads();
// calculating loss by accumulating 200 data point.
if(ty==0) {
if (distance[tx] > 0) {
loss_i[tx] = distance[tx];
} else {
loss_i[tx] = 0;
}
loss_i[yi] = 0;
atomicAdd(&sum_loss, loss_i[tx]); //loss_i is (10, 1)
// __syncthreads();
}
if(ty==0) {
ds[yi] = -1 * sum_ds;
for(int ii=0 ; ii< LEN_F ; ii++) {
int idx = ii * 10 + tx;
single_dw[idx] += x[ii] * ds[tx];
}
}
__syncthreads(); // wait for all 10 threads to finish the single_dw matrix.
}//End--of--Data-Point
if(ty==0) {
for(int ii=0 ; ii< LEN_F ; ii++) {
int idx = ii* 10 + tx;
tmp_w = weights[idx];
tmp_dw = single_dw[idx]/total_examples + 2 * reg_strength * tmp_w;
W_square_single += tmp_w * tmp_w; // calculate for one column of W, for computing loss
weights[idx] = tmp_w - learning_rate * tmp_dw;
}
// 10 threads add to W_square;
atomicAdd(&W_square, W_square_single);
// __syncthreads();
if (tx == 0) { // only one thread is calculating the loss
loss[0] = sum_loss/total_examples + reg_strength * W_square;
}
}//end--of--ty
__syncthreads();
}//End--of--global
|
150
|
// Compile: nvcc -g -G -arch=sm_61 -std=c++11 assignment5-p4.cu -o assignment5-p4
#include <cmath>
#include <cuda.h>
#include <iostream>
#include <sys/time.h>
#define N (1 << 12)
#define THRESHOLD (0.000001)
#define BLOCK_SIZE 32
using std::cerr;
using std::cout;
using std::endl;
__global__ void kernel1(uint64_t* d_A, uint64_t* d_B, uint64_t* d_C) {
// TODO: Fill in
uint64_t i = blockIdx.y * blockDim.y + threadIdx.y;
uint64_t j = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t val = 0;
for(uint64_t k = 0; k < N; k++){
val += d_A[i*N + k] * d_B[k*N + j];
}
d_C[i*N + j] = val;
}
__global__ void kernel2(uint64_t* d_A, uint64_t* d_B, uint64_t* d_C) {
// TODO: Fill in
uint64_t i = blockIdx.y * blockDim.y + threadIdx.y;
uint64_t j = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t top_left_i = blockIdx.y * BLOCK_SIZE;
uint64_t top_left_j = blockIdx.x * BLOCK_SIZE;
uint64_t val = 0;
for(uint64_t block_num = 0; block_num < N/BLOCK_SIZE; block_num++){
__shared__ uint64_t mat1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ uint64_t mat2[BLOCK_SIZE][BLOCK_SIZE];
mat1[threadIdx.y][threadIdx.x] = d_A[(top_left_i + threadIdx.y)*N + (block_num * BLOCK_SIZE + threadIdx.x)];
mat2[threadIdx.y][threadIdx.x] = d_B[(block_num * BLOCK_SIZE + threadIdx.y)*N + (top_left_j + threadIdx.x)];
__syncthreads();
for(uint64_t k = 0; k < BLOCK_SIZE; k++){
val += mat1[threadIdx.y][k] * mat2[k][threadIdx.x];
}
__syncthreads();
}
d_C[i*N + j] = val;
}
__global__ void kernel3(uint64_t* d_A, uint64_t* d_B, uint64_t* d_C) {
// TODO: Fill in
uint64_t i = blockIdx.y * blockDim.y + threadIdx.y;
uint64_t j = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t top_left_i = blockIdx.y * BLOCK_SIZE;
uint64_t top_left_j = blockIdx.x * BLOCK_SIZE;
uint64_t val = 0;
for(uint64_t block_num = 0; block_num < N/BLOCK_SIZE; block_num++){
__shared__ uint64_t mat1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ uint64_t mat2[BLOCK_SIZE][BLOCK_SIZE];
mat1[threadIdx.y][threadIdx.x] = d_A[(top_left_i + threadIdx.y)*N + (block_num * BLOCK_SIZE + threadIdx.x)];
mat2[threadIdx.y][threadIdx.x] = d_B[(block_num * BLOCK_SIZE + threadIdx.y)*N + (top_left_j + threadIdx.x)];
__syncthreads();
for(uint64_t k = 0; k < BLOCK_SIZE; k += 4){
val += mat1[threadIdx.y][k] * mat2[k][threadIdx.x];
val += mat1[threadIdx.y][k+1] * mat2[k+1][threadIdx.x];
val += mat1[threadIdx.y][k+2] * mat2[k+2][threadIdx.x];
val += mat1[threadIdx.y][k+3] * mat2[k+3][threadIdx.x];
}
__syncthreads();
}
d_C[i*N + j] = val;
}
__host__ void cpumatMul(uint64_t* h_A, uint64_t* h_B, uint64_t* h_C) {
for (uint64_t i = 0; i < N; i++) {
for (uint64_t j = 0; j < N; j++) {
uint64_t sum = 0;
for (uint64_t k = 0; k < N; k++) {
sum += h_A[i * N + k] * h_B[k * N + j];
}
h_C[i * N + j] = sum;
}
}
}
__host__ void check_result(uint64_t* w_ref, uint64_t* w_opt) {
bool wrong = false;
for (uint64_t i = 0; i < N; i++) {
for (uint64_t j = 0; j < N; j++) {
if (w_ref[i * N + j] != w_opt[i * N + j]) {
wrong = true;
goto out;
}
}
}
out:
if (wrong) {
cout << " Diffs found!" << endl;
} else {
cout << "No differences found between base and test versions\n";
}
}
double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
int main() {
uint64_t SIZE = N * N;
uint64_t *h_A, *h_B, *h_cpu_C, *h_gpu1_C, *h_gpu2_C;
h_A = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
h_B = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
h_cpu_C = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
h_gpu1_C = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
h_gpu2_C = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
for (uint64_t i = 0; i < N; i++) {
for (uint64_t j = 0; j < N; j++) {
h_A[i * N + j] = rand() % 64;
h_B[i * N + j] = 2;
h_cpu_C[i * N + j] = 0;
h_gpu1_C[i * N + j] = 0;
h_gpu2_C[i * N + j] = 0;
}
}
double clkbegin = rtclock();
cpumatMul(h_A, h_B, h_cpu_C);
double clkend = rtclock();
double cpu_time = clkend - clkbegin;
cout << "Matmul time on CPU: " << cpu_time * 1000 << " msec" << endl;
cudaError_t status;
cudaEvent_t start, end;
uint64_t *d_A, *d_B, *d_C1;
status = cudaMalloc(&d_A, SIZE * sizeof(uint64_t));
if (status != cudaSuccess) {
cerr << cudaGetErrorString(status) << endl;
}
status = cudaMalloc(&d_B, SIZE * sizeof(uint64_t));
status = cudaMalloc(&d_C1, SIZE * sizeof(uint64_t));
dim3 threadsPerBlock(32,32);
dim3 numBlocks(N/threadsPerBlock.x, N/threadsPerBlock.y);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_A, h_A, SIZE * sizeof(uint64_t), cudaMemcpyHostToDevice);
status = cudaMemcpy(d_B, h_B, SIZE * sizeof(uint64_t), cudaMemcpyHostToDevice);
// TODO: Fill in
kernel1<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C1);
cudaMemcpy(h_gpu1_C, d_C1, SIZE * sizeof(uint64_t), cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float kernel_time;
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_C, h_gpu1_C);
std::cout << "Kernel 1 time (ms): " << kernel_time << "\n";
// kernel 2
uint64_t* d_C2;
threadsPerBlock = dim3(BLOCK_SIZE,BLOCK_SIZE);
numBlocks = dim3(N/threadsPerBlock.x, N/threadsPerBlock.y);
status = cudaMalloc(&d_C2, SIZE * sizeof(uint64_t));
// TODO: Fill in
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_A, h_A, SIZE * sizeof(uint64_t), cudaMemcpyHostToDevice);
status = cudaMemcpy(d_B, h_B, SIZE * sizeof(uint64_t), cudaMemcpyHostToDevice);
kernel2<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C2);
cudaMemcpy(h_gpu2_C, d_C2, SIZE * sizeof(uint64_t), cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_C, h_gpu2_C);
std::cout << "Kernel 2 time (ms): " << kernel_time << "\n";
// kernel 3
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_A, h_A, SIZE * sizeof(uint64_t), cudaMemcpyHostToDevice);
status = cudaMemcpy(d_B, h_B, SIZE * sizeof(uint64_t), cudaMemcpyHostToDevice);
kernel3<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C2);
cudaMemcpy(h_gpu2_C, d_C2, SIZE * sizeof(uint64_t), cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_C, h_gpu2_C);
std::cout << "Kernel 3 time (ms): " << kernel_time << "\n";
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C1);
cudaFree(d_C2);
free(h_A);
free(h_B);
free(h_cpu_C);
free(h_gpu1_C);
free(h_gpu2_C);
return EXIT_SUCCESS;
}
|
151
|
#include <iostream>
#include <cuda_runtime.h>
#include <cuda.h>
#define BDIM 32
CUdevice device;
CUcontext context;
CUmodule module;
CUfunction function;
#define module_file "kernel.cubin"
#define kernel_name "arr_kernel"
void initCUDA()
{
int deviceCount = 0;
CUresult err = cuInit(0);
if (err == CUDA_SUCCESS)
cuDeviceGetCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "Error: no devices supporting CUDA\n");
exit(-1);
}
// get first CUDA device
cuDeviceGet(&device, 0);
char name[100];
cuDeviceGetName(name, 100, device);
printf("> Using device 0: %s\n", name);
err = cuCtxCreate(&context, 0, device);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "* Error initializing the CUDA context.\n");
cuCtxDestroy(context);
exit(-1);
}
err = cuModuleLoad(&module, module_file);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "* Error loading the module %s\n", module_file);
const char * str;
cuGetErrorString(err, &str);
fprintf(stderr, "%s\n", str);
cuCtxDestroy(context);
exit(-1);
}
err = cuModuleGetFunction(&function, module, kernel_name);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "* Error getting kernel function %s\n", kernel_name);
const char * str;
cuGetErrorString(err, &str);
fprintf(stderr, "%s\n", str);
cuCtxDestroy(context);
exit(-1);
}
}
int main() {
int size = BDIM * 16 * sizeof(int);
int *in = (int *)malloc(size);
int *out = (int *)malloc(size);
int *in_dev, *out_dev;
initCUDA();
cudaMalloc(&in_dev, size);
cudaMalloc(&out_dev, size);
for (int i = 0; i < BDIM; ++i)
in[i] = i;
cudaMemcpy(in_dev, in, size, cudaMemcpyHostToDevice);
void * args[2] = {&in_dev, &out_dev};
cuLaunchKernel(function,
1, 1, 1,
BDIM, 1, 1,
0, 0, args, 0);
// Test
cudaMemcpy(out, out_dev, size, cudaMemcpyDeviceToHost);
printf("%d\n",out[0]);
return 0;
}
|
152
|
#include "includes.h"
using namespace std;
#define GAUSS_WIDTH 5
#define SOBEL_WIDTH 3
typedef struct images {
char *pType;
int width;
int height;
int maxValColor;
unsigned char *data;
} image;
/**
Reads the input file formatted as pnm. The actual implementation
supports only P5 type pnm images (grayscale).
*/
__global__ void applyGaussianFilter(unsigned char *input, unsigned char *output, float *kernel, int iHeight, int iWidth, int kWidth) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
double sum = 0.0;
int halvedKW = kWidth / 2;
for (int i = -halvedKW; i <= halvedKW; i++) {
for (int j = -halvedKW; j <= halvedKW; j++) {
if ((x + j) < iWidth && (x + j) >= 0 && (y + i) < iHeight && (y + i) >= 0) {
int kPosX = (j + halvedKW);
int kPosY = (i + halvedKW);
sum = sum + (float)(input[(y + i) * iWidth + (x + j)]) * kernel[kPosY * kWidth + kPosX];
}
}
}
if (sum > 255.0)
sum = 255.0;
output[y * iWidth + x] = (unsigned char)sum;
}
|
153
|
#include "includes.h"
__device__ float sigmoid(float x) {
return 1 / (1 + expf(-x));
}
__global__ void produceState3(const float* arguments, const int argsSize, const float* weights, const int* topology, const int topSize, float* outStates) {
const int tid = threadIdx.x;
const int dim = argsSize + topSize;
extern __shared__ float s[];
float* states = s;
int* iters = (int*)&states[dim];
if (tid < argsSize) {
states[tid] = arguments[tid];
iters[tid] = 1;
} else {
iters[tid] = 0;
}
__syncthreads();
while(iters[tid] * blockDim.x + tid < dim) {
const int index = iters[tid] * blockDim.x + tid;
const int topIndex = index - argsSize;
const int leftBorder = topology[topIndex*3];
const int rightBorder = topology[topIndex*3 + 1];
const int weightsStart = topology[topIndex*3 + 2];
bool canStart = true;
for (int i = leftBorder; i < rightBorder; i++) {
int threadId = i % blockDim.x;
int mustCounted = i / blockDim.x + 1;
if (iters[threadId] < mustCounted) {
canStart = false;
break;
}
}
if (canStart) {
float sum = 0;
for (int i = leftBorder; i < rightBorder; i++) {
sum += states[i] * weights[weightsStart + i - leftBorder];
}
states[index] = sigmoid(sum);
iters[tid]++;
}
__syncthreads();
}
__syncthreads();
int n = tid;
while(n < dim) {
outStates[n] = states[n];
n += blockDim.x;
}
}
|
154
|
#include <iostream>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
//#include <ctime>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
//#include "../lib/cuPrintf.cu"
using namespace std;
typedef double TNum;
#define CSC(call) do { \
cudaError_t e = call; \
if (e != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n"\
, __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(0); \
} \
} while(0)
//#define EPS .0000001;
//const int32_t BLOCK_DIM = 32;
struct Comparator {
__host__ __device__ bool operator()(TNum a, TNum b) {
return a < b;
}
};
__constant__ int32_t SIZE_N[1];
__constant__ int32_t SIZE_M[1];
__constant__ int32_t SIZE_K[1];
struct Position {
int32_t Row;
int32_t Col;
};
#define IsCorrectPos(i, j, height, width) (i < height && j < width)
#define GetLinearPosition(i, j, height, width) (IsCorrectPos(i, j, height, width) ? \
(j * height + i) : -1)
__global__ void SwapRows(TNum *a, TNum *b, int32_t row1, int32_t row2, int32_t shift) {
int32_t begin = blockDim.x * blockIdx.x + threadIdx.x;
int32_t offset = gridDim.x * blockDim.x;
int32_t col;
TNum tmp;
for (col = begin + shift; col < *SIZE_M; col += offset) {
tmp = a[GetLinearPosition(row1, col, *SIZE_N, *SIZE_M)];
a[GetLinearPosition(row1, col, *SIZE_N, *SIZE_M)] = a[GetLinearPosition(row2, col, *SIZE_N, *SIZE_M)];
a[GetLinearPosition(row2, col, *SIZE_N, *SIZE_M)] = tmp;
}
for (col = begin; col < *SIZE_K; col += offset) {
tmp = b[GetLinearPosition(row1, col, *SIZE_N, *SIZE_K)];
b[GetLinearPosition(row1, col, *SIZE_N, *SIZE_K)] = b[GetLinearPosition(row2, col, *SIZE_N, *SIZE_K)];
b[GetLinearPosition(row2, col, *SIZE_N, *SIZE_K)] = tmp;
}
}
__global__ void Normalize(TNum *a, TNum *b, int32_t row, int32_t shift) {
if (!(abs(a[GetLinearPosition(row, shift, *SIZE_N, *SIZE_M)]) > .0000001)) {
return;
}
int32_t begin = blockDim.x * blockIdx.x + threadIdx.x;
int32_t offset = gridDim.x * blockDim.x;
int32_t col;
for (col = begin + shift + 1; col < *SIZE_M; col += offset) {
a[GetLinearPosition(row, col, *SIZE_N, *SIZE_M)] /=
a[GetLinearPosition(row, shift, *SIZE_N, *SIZE_M)];
}
for (col = begin; col < *SIZE_K; col += offset) {
b[GetLinearPosition(row, col, *SIZE_N, *SIZE_K)] /=
a[GetLinearPosition(row, shift, *SIZE_N, *SIZE_M)];
}
}
__global__ void GaussFirst(TNum *a, TNum *b, int32_t row, int32_t shift) {
if (!(abs(a[GetLinearPosition(row, shift, *SIZE_N, *SIZE_M)]) > .0000001)) {
return;
}
/*Position begin = SetPosition(blockDim.x * blockIdx.x + threadIdx.x,
blockDim.y * blockIdx.y + threadIdx.y);
Position offset = SetPosition(blockDim.x * gridDim.x, blockDim.y * gridDim.y);*/
//Position curr = begin;
int32_t beginRow = blockDim.x * blockIdx.x + threadIdx.x;
int32_t beginCol = blockDim.y * blockIdx.y + threadIdx.y;
int32_t offsetRow = blockDim.x * gridDim.x;
int32_t offsetCol = blockDim.y * gridDim.y;
Position curr;
//TNum head;
for (curr.Row = beginRow + row + 1; curr.Row < *SIZE_N; curr.Row += offsetRow) {
//head = a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
if (!(abs(a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)]) > .0000001)) {
continue;
}
for (curr.Col = beginCol + shift + 1; curr.Col < *SIZE_M; curr.Col += offsetCol) {
//cuPrintf("\nA\n");
a[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_M)] -=
a[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_M)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
for (curr.Col = beginCol; curr.Col < *SIZE_K; curr.Col += offsetCol) {
//cuPrintf("\nB\n");
b[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_K)] -=
b[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_K)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
//cuPrintf("\nMAX = %ld\n", max(*SIZE_M, *SIZE_K));
/*for (curr.Col = beginCol; curr.Col < max(*SIZE_M - shift - 1, *SIZE_K); curr.Col += offsetCol) {
//cuPrintf("\nSTEP %d\n", curr.Col);
//cuPrintf("%d >= %d + %d + 1 && %d < %d\n", curr.Col, beginCol, shift, curr.Col, *SIZE_M);
if (curr.Col < *SIZE_M - shift - 1) {
//cuPrintf("\nA\n");
a[GetLinearPosition(curr.Row, (curr.Col + shift + 1), *SIZE_N, *SIZE_M)] -=
a[GetLinearPosition(row, (curr.Col + shift + 1), *SIZE_N, *SIZE_M)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
if (curr.Col < *SIZE_K) {
//cuPrintf("\nB\n");
b[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_K)] -=
b[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_K)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
}*/
}
}
__global__ void GaussSecond(TNum *a, TNum *b, int32_t row, int32_t shift) {
/*Position begin = SetPosition(blockDim.x * blockIdx.x + threadIdx.x,
blockDim.y * blockIdx.y + threadIdx.y);
Position offset = SetPosition(blockDim.x * gridDim.x, blockDim.y * gridDim.y);*/
int32_t beginRow = blockDim.x * blockIdx.x + threadIdx.x;
int32_t beginCol = blockDim.y * blockIdx.y + threadIdx.y;
int32_t offsetRow = blockDim.x * gridDim.x;
int32_t offsetCol = blockDim.y * gridDim.y;
Position curr;
for (curr.Row = row - 1 - beginRow; curr.Row >= 0; curr.Row -= offsetRow) {
/*for (curr.Col = begin.Col + shift; curr.Col < *SIZE_M; curr.Col += offset.Col) {
a[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_M)] -=
a[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_M)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}*/
for (curr.Col = beginCol; curr.Col < *SIZE_K; curr.Col += offsetCol) {
b[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_K)] -=
b[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_K)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
}
}
/*__host__ void GaussSecondCPU(TNum *a, TNum *b, int32_t row, int32_t shift) {
Position curr;
for (curr.Row = row - 1; curr.Row >= 0; curr.Row--) {
for (curr.Col = shift; curr.Col >= 0; curr.Col -= offset.Col) {
a[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_M)] -=
a[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_M)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
for (curr.Col = begin.Col; curr.Col >= 0; curr.Col -= offset.Col) {
b[GetLinearPosition(curr.Row, curr.Col, *SIZE_N, *SIZE_K)] -=
b[GetLinearPosition(row, curr.Col, *SIZE_N, *SIZE_K)] *
a[GetLinearPosition(curr.Row, shift, *SIZE_N, *SIZE_M)];
}
}
}*/
__host__ void InputMatrix(TNum *matrix, int32_t height, int32_t width) {
for (int32_t i = 0; i < height; i++) {
for (int32_t j = 0; j < width; j++) {
//cin >> matrix[GetLinearPosition(i, j, height, width)];
scanf("%le", matrix + GetLinearPosition(i, j, height, width));
}
}
}
__host__ void PrintMatrix(TNum *matrix, int32_t height, int32_t width) {
for (int32_t i = 0; i < height; i++) {
for (int32_t j = 0; j < width; j++) {
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << scientific << matrix[GetLinearPosition(i, j, height, width)];
printf("%e", matrix[GetLinearPosition(i, j, height, width)]);
}
cout << endl;
}
}
__host__ int main(void) {
Comparator cmp;
int32_t n, m, k;
//cin >> n >> m >> k;
//scanf("%d%d%d", &n, &m, &k);
scanf("%d", &n);
scanf("%d", &m);
scanf("%d", &k);
///cout << n << " " << m << " " << k << endl;
CSC(cudaMemcpyToSymbol(SIZE_N, &n, sizeof(int32_t)));
CSC(cudaMemcpyToSymbol(SIZE_M, &m, sizeof(int32_t)));
CSC(cudaMemcpyToSymbol(SIZE_K, &k, sizeof(int32_t)));
TNum *a = new TNum[n * m];
TNum *b = new TNum[n * k];
//bool *is_success = new bool;
InputMatrix(a, n, m);
InputMatrix(b, n, k);
TNum *cuda_a;
TNum *cuda_b;
//bool *cuda_is_success;
CSC(cudaMalloc((void**) &cuda_a, sizeof(TNum) * n * m));
CSC(cudaMalloc((void**) &cuda_b, sizeof(TNum) * n * k));
//CSC(cudaMalloc((void**) &cuda_is_success, sizeof(bool)));
CSC(cudaMemcpy(cuda_a, a, sizeof(TNum) * n * m, cudaMemcpyHostToDevice));
CSC(cudaMemcpy(cuda_b, b, sizeof(TNum) * n * k, cudaMemcpyHostToDevice));
int32_t row = 0;
int32_t *shifts = new int32_t[n];
//cudaPrintfInit();
memset(shifts, 0, n * sizeof(int32_t));
/*dim3 threads_per_block(n, m);
dim3 blocks_per_grid(1, 1);
if (n * m > BLOCK_DIM * BLOCK_DIM){
threads_per_block.x = BLOCK_DIM;
threads_per_block.y = BLOCK_DIM;
blocks_per_grid.x = ceil((double) (n) / (double)(threads_per_block.x));
blocks_per_grid.y = ceil((double) (m) / (double)(threads_per_block.y));
}*/
for (int32_t col = 0; col < m && row < n; col++) {
/*CSC(cudaMemcpy(a, cuda_a, sizeof(TNum) * n * m, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "___" << endl;*/
if (row < n - 1) {
thrust::device_ptr <TNum> cuda_a_begin = thrust::device_pointer_cast(cuda_a);
thrust::device_ptr <TNum> cuda_a_max = thrust::max_element(
cuda_a_begin + GetLinearPosition(row, col, n, m),
cuda_a_begin + (col + 1) * n, cmp);
int32_t row_max_pos = cuda_a_max - cuda_a_begin - GetLinearPosition(0, col, n, m);
//TNum row_value, max_value;
//cout << sizeof(TNum) << endl;
//cout << cuda_a << " : " << cuda_a + n * m * sizeof(TNum) << endl;
//cout << cuda_a + sizeof(TNum) * GetLinearPosition(row, col, n, m) << " : " <<
//cuda_a + sizeof(TNum) * GetLinearPosition(row_max_pos, col, n, m) << endl;
/*CSC(cudaMemcpy(&row_value, cuda_a + GetLinearPosition(row, col, n, m),
sizeof(TNum), cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(&max_value, cuda_a + GetLinearPosition(row_max_pos, col, n, m),
sizeof(TNum), cudaMemcpyDeviceToHost));
TNum curr = row_value;*/
//cout << curr << " : " << max_value << endl;
if (row_max_pos != row) {
SwapRows<<<dim3(1024), dim3(1024)>>>(cuda_a, cuda_b, row, row_max_pos, col);
//curr = max_value;
}
/*if (!(abs(curr) > .0000001)) {
//cout << "CURR = " << curr << endl;
//cout << "OUT1" << endl;
continue;
}*/
}/* else {
TNum curr;
//cout << GetLinearPosition(row, col, n, m) << endl;
//cout << row << ":" << col << endl;
CSC(cudaMemcpy(&curr, cuda_a + GetLinearPosition(row, col, n, m),
sizeof(TNum), cudaMemcpyDeviceToHost));
if (!(abs(curr) > .0000001)) {
//cout << "OUT2" << endl;
continue;
}
}*/
/*CSC(cudaMemcpy(a, cuda_a, sizeof(TNum) * n * m, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
cout << "Col: " << col << endl;
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "~~~" << endl;*/
//cudaPrintfInit();
Normalize<<<dim3(1024), dim3(1024)>>>(cuda_a, cuda_b, row, col);
//bool is_success;
TNum curr;
CSC(cudaMemcpy(&curr, cuda_a + GetLinearPosition(row, col, n, m),
sizeof(TNum), cudaMemcpyDeviceToHost));
if (!(abs(curr) > .0000001)) {
//cout << "OUT2" << endl;
continue;
}
//cout << (*is_success ? "true" : "false") << endl;
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
/*CSC(cudaMemcpy(a, cuda_a, sizeof(TNum) * n * m, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "+++" << endl;*/
if (row < n - 1) {
GaussFirst<<<dim3(32, 32), dim3(32, 32)>>>(cuda_a, cuda_b, row, col);
}
//cout << shifts[row] << " -> " << col << endl;
shifts[row] = col;
row++;
/*CSC(cudaMemcpy(a, cuda_a, sizeof(TNum) * n * m, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "===" << endl << endl;*/
}
/*cout << "NEXT!!" << endl;
CSC(cudaMemcpy(a, cuda_a, sizeof(TNum) * n * m, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "===" << endl << endl;*/
for (int32_t row_curr = row - 1; row_curr >= 0; row_curr--) {
if (row_curr > 0) {
GaussSecond<<<dim3(32, 32), dim3(32, 32)>>>(cuda_a, cuda_b, row_curr, shifts[row_curr]);
}
/*CSC(cudaMemcpy(a, cuda_a, sizeof(TNum) * n * m, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
PrintMatrix(a, n, m);
cout << "---" << endl;
PrintMatrix(b, n, k);
cout << "===" << endl << endl;*/
}
//int32_t *cuda_shifts;
//cudaMalloc((void**) &cuda_shifts, sizeof(int32_t) * row);
//cudaMemcpy(cuda_shifts, shifts, sizeof(int32_t) * row, cudaMemcpyHostToDevice);
//GetResult<<<dim3(32, 32), dim3(32, 32)>>>(cuda_b, cuda_x, cuda_shifts, row, );
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
/*cudaEvent_t syncEvent;
cudaEventCreate(&syncEvent);
cudaEventRecord(syncEvent, 0);
cudaEventSynchronize(syncEvent);
cudaEventDestroy(syncEvent);*/
//Calculating end
CSC(cudaMemcpy(b, cuda_b, sizeof(TNum) * n * k, cudaMemcpyDeviceToHost));
CSC(cudaFree(cuda_a));
CSC(cudaFree(cuda_b));
//cudaFree(cuda_x);
//PrintMatrix(cuda_b, shifts, m, k);
TNum zero = 0.;
int32_t untill = 0;
if (row > 0) {
untill = shifts[0];
}
int32_t rows_cnt = 0;
for (int32_t i = 0; i < untill; i++) {
for (int32_t j = 0; j < k; j++) {
//cout << "1: " << shifts[0] << "::" << i << ":" << j << endl;
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << scientific << zero;
printf("%e", zero);
}
rows_cnt++;
//cout << endl;
printf("\n");
}
//cout << row << endl;
for (int32_t i = 0; i < row; i++) {
if (i > 0) {
for (int32_t ii = 0; ii < shifts[i] - shifts[i - 1] - 1; ii++) {
for (int32_t j = 0; j < k; j++) {
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << "2: " << i << ":" << j << endl;
//cout << scientific << zero;
printf("%e", zero);
}
rows_cnt++;
//cout << endl;
printf("\n");
}
}
for (int32_t j = 0; j < k; j++) {
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << "3: " << i << ":" << j << endl;
//cout << scientific << b[GetLinearPosition(i, j, n, k)];
printf("%e", b[GetLinearPosition(i, j, n, k)]);
}
rows_cnt++;
//cout << endl;
printf("\n");
}
//cout << "TEST0" << endl;
//cout << shifts[0] << endl;
//untill = m - shifts[max(0, (int32_t) row - 1)];
for (int32_t i = 0; i < m - rows_cnt; i++) {
for (int32_t j = 0; j < k; j++) {
if (j > 0) {
//cout << " ";
printf(" ");
}
//cout << "4: " << i << ":" << j << endl;
//cout << scientific << zero;
printf("%e", zero);
}
//cout << endl;
printf("\n");
}
//cout << "TEST1" << endl;
/*cout << "SHIFTS:\n";
for (int32_t i = 0; i < row; i++) {
cout << shifts[i] << endl;
}*/
delete [] shifts;
delete [] a;
delete [] b;
//delete [] cuda_x;
return 0;
}
|
155
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// const int n = 1<<20;
// const int blockSize = 1024;
// const int gridSize = (int)ceil((float)n/blockSize);
__global__ void dotProduct(float *a, float *b, float *c, int n)
{
extern __shared__ float cache[];
int tId = blockIdx.x*blockDim.x+threadIdx.x;
int cacheIndex = threadIdx.x;
if (tId < n)
cache[cacheIndex] = a[tId] * b[tId];
__syncthreads();
// For reductions, threadsPerBlock must be a power of 2
int i = blockDim.x / 2;
while(i != 0)
{
if(cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if(cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main( int argc, char* argv[] )
{
int n = 1<<20;
int blockSize = 1024;
int gridSize = (int)ceil((float)n/blockSize);
// Size of vectors
float n_f = (float)n;
// Host input vectors
float *h_a, *h_b, *h_c;
// Device input vectors
float *d_a, *d_b, *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(float);
// Allocate memory for each vector on host
h_a = new float[bytes](); //(float*)malloc(bytes);
h_b = new float[bytes](); //(float*)malloc(bytes);
h_c = new float[gridSize * sizeof(float)];
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, gridSize * sizeof(float));
// Initialize vectors on host
for(int i = 0; i <= n; i++ ) {
h_a[i] = 1.0f;
h_b[i] = 2.0f;
}
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
// Execute the kernel
dotProduct<<<gridSize, blockSize, blockSize * sizeof(float)>>>(d_a, d_b, d_c, n);
// Copy array back to host
cudaMemcpy( h_c, d_c, gridSize * sizeof(float), cudaMemcpyDeviceToHost );
float sum;
for(int i=0; i < gridSize; i++)
sum += h_c[i];
printf("final result: %f\n", sum / n_f);
printf("final result: %f\n", sum);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
156
|
// 必要的头文件
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
// 数组元素相加
__global__ void simple_add(const int *A, const int *B, int *C)
{
C[threadIdx.x] = A[threadIdx.x] + B[threadIdx.x];
}
// 主函数
int main(int arg, char* args[])
{
// 待操作的数组
const int size = 10;
int A[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
int B[] = { 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 };
int C[size];
// 显存中数据的指针
int *buffer_A = 0;
int *buffer_B = 0;
int *buffer_C = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
std::cout << "No CUDA devices found!" << std::endl;
exit(1);
}
// 获取设备名
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
std::cout << "Using device: " << prop.name << std::endl;
// Allocate GPU buffers for three vectors (two input, one output).
cudaMalloc((void**)&buffer_A, size * sizeof(int));
cudaMalloc((void**)&buffer_B, size * sizeof(int));
cudaMalloc((void**)&buffer_C, size * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(buffer_A, A, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(buffer_B, B, size * sizeof(int), cudaMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
simple_add<<<1, size>>>(buffer_A, buffer_B, buffer_C);
// Check for any errors launching the kernel
// NOTICE 这个是可以学习的,在执行完程序之后,判断核函数的执行状态
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
std::cout << "Kernel launch failed: " << cudaGetErrorString(cudaStatus) << std::endl;
cudaFree(buffer_A);
cudaFree(buffer_B);
cudaFree(buffer_C);
exit(1);
}
//? 提问,那么为什么不是在同步状态之后再对核函数的执行状态进行确认呢?
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
// 出错啦,直接释放相关的内存区域
std::cout << "Could not synchronize device!" << std::endl;
cudaFree(buffer_A);
cudaFree(buffer_B);
cudaFree(buffer_C);
exit(1);
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(C, buffer_C, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(buffer_A);
cudaFree(buffer_B);
cudaFree(buffer_C);
// NOTICE 时刻判断程序的运行结果
if(cudaStatus != cudaSuccess)
{
std::cout << "Could not copy buffer memory to host!" << std::endl;
exit(1);
}
//Prints the array
std::cout << "Result:" << std::endl;
for (int i = 0; i < size; i++)
{
std::cout << A[i] << " + " << B[i] << " = " << C[i] << std::endl;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
{
std::cout << "Device reset failed!" << std::endl;
exit(1);
}
return 0;
}
|
157
|
#include "includes.h"
__global__ void laplacian(float *dst, const float *src, const size_t width, const size_t height, const size_t pixelsPerThread)
{
const size_t col = (blockIdx.x * blockDim.x + threadIdx.x) % width;
const size_t crow = (blockIdx.x * blockDim.x + threadIdx.x) / width * pixelsPerThread;
if (col >= width || crow >= height)
return;
const size_t srow = crow + 1;
const size_t erow = min((unsigned int)(crow + pixelsPerThread - 1), (unsigned int)(height - 1));
// First element
const size_t firstIdx = crow * width + col;
dst[firstIdx] = src[firstIdx];
if (crow + 1 < height) dst[firstIdx] -= 0.25f * src[firstIdx + width]; // S
if (crow >= 1) dst[firstIdx] -= 0.25f * src[firstIdx - width]; // N
if (col + 1 < width) dst[firstIdx] -= 0.25f * src[firstIdx + 1]; // E
if (col >= 1) dst[firstIdx] -= 0.25f * src[firstIdx - 1]; // W
// Inner elements
for (int row = srow; row < erow; ++row)
{
const size_t cIdx = row * width + col;
// C, S, N (always exist)
dst[cIdx] = src[cIdx] - 0.25f * (src[cIdx + width] + src[cIdx - width]);
if (col + 1 < width) dst[cIdx] -= 0.25f * src[cIdx + 1]; // E
if (col >= 1) dst[cIdx] -= 0.25f * src[cIdx - 1]; // W
}
if (erow <= crow)
return;
// Last element
const size_t lastIdx = erow * width + col;
dst[lastIdx] = src[lastIdx] - 0.25f * src[lastIdx - width]; // C, N
if (erow + 1 < height) dst[lastIdx] -= 0.25f * src[lastIdx + width]; // S
if (col + 1 < width) dst[lastIdx] -= 0.25f * src[lastIdx + 1]; // E
if (col >= 1) dst[lastIdx] -= 0.25f * src[lastIdx - 1]; // W
}
|
158
|
// Exemplo do Hello World em CUDA
// Compilar: make
// Executar: qsub job (cluster)
#include <stdio.h>
#include <stdlib.h>
// Funcao executada na GPU, tambem eh chamada de kernel
__global__ void kernel ()
{
// No caso eh um kernel que vai para GPU e nao faz nada
}
int main ()
{
// Informamos ao codigo da CPU que queremos executar a funcao kernel na GPU
kernel<<<1,1>>>();
// Voltamos a execucao na CPU e printamos a mensagem de Hello World.
printf("Hello World!\n");
return 0;
}
|
159
|
#include <fstream>
#include <iostream>
#include <assert.h>
#include <stdlib.h>
#include <random>
#define show(x) std::cout << #x ": " << x << std::endl;
#define BLOCKSIZE 128
__global__ void pi(float *blockSums, int stepsPerThread, float dx) {
__shared__ float threadSums[BLOCKSIZE];
int id = threadIdx.x + blockDim.x * blockIdx.x;
int istart = id * stepsPerThread;
int istop = istart + stepsPerThread;
float accum = 0.0f;
for (int i = istart; i < istop; i++) {
float x = (i + 0.5f) * dx;
accum += 4.0f / (1.0f + x*x);
}
threadSums[threadIdx.x] = accum;
__syncthreads();
if (threadIdx.x == 0) {
float blockSum = 0.0f;
for (int j = 0; j < blockDim.x; j++) {
blockSum += threadSums[j];
}
blockSums[blockIdx.x] = blockSum;
}
}
int main()
{
cudaError_t err;
const int stepsPerThread = 512 * 2 * 2;
const int blockSize = BLOCKSIZE;
const int numBlocks = 256;
const int numSteps = blockSize * numBlocks * stepsPerThread;
const float dx = 1.0f / numSteps;
float *h_blockSums = (float *)malloc(sizeof(float) * numBlocks);
float *d_blockSums; err = cudaMalloc((void**)&d_blockSums, sizeof(float) * numBlocks); assert(err == cudaSuccess);
err = cudaMemcpy(d_blockSums, h_blockSums, sizeof(float) * numBlocks, cudaMemcpyHostToDevice); assert(err == cudaSuccess);
pi<<<numBlocks, blockSize>>> (d_blockSums, stepsPerThread, dx);
err = cudaMemcpy(h_blockSums, d_blockSums, sizeof(float) * numBlocks, cudaMemcpyDeviceToHost); assert(err == cudaSuccess);
float pi = 0.0f;
for (int i = 0; i < numBlocks; i++)
pi += h_blockSums[i];
pi *= dx;
printf("pi approximately equals: %f\n", pi);
cudaFree(d_blockSums);
free(h_blockSums);
return 0;
}
|
160
|
//example where there is heavy computation done
//using very little data, this example GPU outperforms CPU
//by 100 of times at least
#include <cstdlib>
#include <ctime>
#include <iostream>
#define TSZ 1024
#define BSZ 1024
#define N (BSZ * TSZ)
#define M 100000
#define TT float
using namespace std;
template <typename T>
__global__ void o2_cuda(T* a){
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
a[i] = (T)i / (T)N;
for (size_t j = 0; j < M; ++j)
a[i] = a[i] * a[i] - 0.25;
}
template <typename T>
clock_t o2(T* a){
for (size_t i = 0; i < N; ++i){
a[i] = (T)i / (T)N;
for (int j = 0; j < M; ++j)
a[i] = a[i] * a[i] - 0.25F;
}
return clock();
}
int main(){
TT* a = new TT[N], *b = new TT[N];
TT* db;
cudaMalloc(&db, N * sizeof(TT));
clock_t timing_start = clock();
o2_cuda<<<BSZ, TSZ>>>(db);
cudaMemcpy(b, db, sizeof(TT) * N, cudaMemcpyDeviceToHost);
cout << "CUDA time: " << (clock() - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl;
cudaFree(db);
timing_start = clock();
clock_t timing_end = o2(a);
cout << "CPU time: " << (timing_end - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl;
bool is_same = true;
for (size_t i = 0; i < N; ++i)
if (a[i] != b[i]){
cout << "Index " << i << " is different" << endl;
is_same = false;
break;
}
if (is_same) cout << "Answer match" << endl;
}
|
161
|
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <functional>
#include <iostream>
#include <random>
#include <stdlib.h>
#include <sys/time.h>
#include <vector>
using namespace std;
struct saxpy_functor {
const double a;
saxpy_functor(double _a) : a(_a) {}
__host__ __device__
double operator()(const double& x, const double& y) const {
return a*x + y;
}
};
double compute_time(struct timeval start_time, struct timeval end_time) {
return (end_time.tv_sec - start_time.tv_sec) +
1.0e-6*(end_time.tv_usec - start_time.tv_usec);
}
double thrust_compute(thrust::device_vector<double>& x,
thrust::device_vector<double>& y) {
for (double a = 1.0e-5; a < 1.0e-2; a += 1.0e-5)
thrust::transform(x.begin(), x.end(), y.begin(), y.begin(),
saxpy_functor(a));
return thrust::reduce(y.begin(), y.end(),
(double) 0.0, thrust::plus<double>());
}
double host_compute(vector<double>& x, vector<double>& y) {
for (double a = 1.0e-5; a < 1.0e-2; a += 1.0e-5)
for (int i = 0; i < x.size(); i++)
y[i] += a*x[i];
double result = 0.0;
for (int i = 0; i < y.size(); i++)
result += y[i];
return result;
}
int main(int argc, char *argv[]) {
const bool verbose = false;
struct timeval start_time, end_time;
int n = 5;
if (argc > 1)
n = atoi(argv[1]);
vector<double> x_host(n);
vector<double> y_host(n);
for (int i = 0; i < x_host.size(); i++) {
x_host[i] = i + 1.0;
y_host[i] = i + 10.0;
}
gettimeofday(&start_time, NULL);
thrust::device_vector<double> x = x_host;
thrust::device_vector<double> y = y_host;
gettimeofday(&end_time, NULL);
cout << "data transfer time = " << compute_time(start_time, end_time)
<< endl;
if (verbose) {
cout << "x (GPU)" << endl;
thrust::copy(x.begin(), x.end(),
ostream_iterator<double>(cout, "\n"));
cout << "y (GPU)" << endl;
thrust::copy(y.begin(), y.end(),
ostream_iterator<double>(cout, "\n"));
}
gettimeofday(&start_time, NULL);
double thrust_result = thrust_compute(x, y);
gettimeofday(&end_time, NULL);
cout << "GPU compute time = " << compute_time(start_time, end_time)
<< endl;
if (verbose) {
cout << "GPU result" << endl;
thrust::copy(y.begin(), y.end(),
ostream_iterator<double>(cout, "\n"));
}
cout << "GPU sum = " << thrust_result << endl;
gettimeofday(&start_time, NULL);
double host_result = host_compute(x_host, y_host);
gettimeofday(&end_time, NULL);
cout << "CPU compute time = " << compute_time(start_time, end_time)
<< endl;
cout << "CPU sum = " << host_result << endl;
return 0;
}
|
162
|
//Cu12 cpp cu combo test.cpp
namespace Test012_1{
#define threadPerBlock_12_1 2000
__global__ void kernel(int *dst,int *src,int N){
int id = blockIdx.x * threadPerBlock_12_1 * threadIdx.x;
int x = src[id];
int y;
if(x >=0){
y = 2*x*x*x+3*x*x*+x+1;
}else{
y= -x;
}
}
};
|
163
|
#include "includes.h"
__global__ void compute_absv(const unsigned int nSpheres, const float* velX, const float* velY, const float* velZ, float* d_absv) {
unsigned int my_sphere = blockIdx.x * blockDim.x + threadIdx.x;
if (my_sphere < nSpheres) {
float v[3] = {velX[my_sphere], velY[my_sphere], velZ[my_sphere]};
d_absv[my_sphere] = sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
}
}
|
164
|
//pass
//--blockDim=2048 --gridDim=64
__global__ void foo(int *r) {
r[threadIdx.x + blockIdx.x * blockDim.x] = warpSize;
}
|
165
|
#include "includes.h"
using namespace std;
__global__ void prescan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[2 * thid] = g_idata[2 * thid]; // load input into shared memory
temp[2 * thid + 1] = g_idata[2 * thid + 1];
//printf("%d - %f - %f \n", thid, g_odata[2 * thid], g_odata[2 * thid + 1]);
//printf("%d - %f - %f \n", thid, g_idata[2 * thid], g_idata[2 * thid + 1]);
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[2 * thid] = temp[2 * thid]; // write results to device memory
g_odata[2 * thid + 1] = temp[2 * thid + 1];
// printf("%d - %f - %f \n", thid, g_odata[2 * thid], g_odata[2 * thid + 1]);
//printf("%d - %f - %f \n", thid, g_idata[2 * thid], g_idata[2 * thid + 1]);
}
|
166
|
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <limits.h>
#define OPERATOR *
#define OPERATOR_NAME "multiplication"
#define DTYPE float
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
a[i] = rand();
}
void random_floats(float* a, int N)
{
for (int i = 0; i < N; ++i)
a[i] = (float)rand()/(float)(RAND_MAX/a[i]);
}
__global__ void add_gpu_blocks(DTYPE *a, DTYPE *b, DTYPE *c) {
c[blockIdx.x] = a[blockIdx.x] OPERATOR b[blockIdx.x];
}
__global__ void add_gpu_threads(DTYPE *a, DTYPE *b, DTYPE *c) {
c[threadIdx.x] = a[threadIdx.x] OPERATOR b[threadIdx.x];
}
__global__ void add_gpu_both(DTYPE *a, DTYPE *b, DTYPE *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] OPERATOR b[index];
}
void add_cpu(DTYPE *a, DTYPE *b, DTYPE *c, int size) {
for (int i=0; i<size; ++i)
c[i] = a[i] OPERATOR b[i];
}
#define N (int)2048*2048*120
#define THREADS_PER_BLOCK 512
int main(void) {
int device;
cudaGetDevice(&device);
struct cudaDeviceProp props;
cudaGetDeviceProperties(&props, device);
printf("Using %s.\n\n", props.name);
DTYPE *a, *b, *c; // host copies of a, b, c
DTYPE *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N*sizeof(DTYPE);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Alloc space for host copies of a, b, c and setup input values
a = (DTYPE *)malloc(size); random_floats(a, N);
b = (DTYPE *)malloc(size); random_floats(b, N);
c = (DTYPE *)malloc(size);
printf("Calculating vector %s with %d elements...\n", OPERATOR_NAME, N);
clock_t t_cpu;
t_cpu = clock();
add_cpu(a, b, c, N);
t_cpu = clock() - t_cpu;
double time_taken_cpu = 1000*((double)t_cpu)/CLOCKS_PER_SEC; // in micro-second
printf("CPU: %.2fms\n", time_taken_cpu);
// Calculate the time taken by calculation
clock_t t_start;
t_start = clock();
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
clock_t t_mem_in = clock();
// Launch add() kernel on GPU
add_gpu_both<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a, d_b, d_c, N);
cudaDeviceSynchronize();
clock_t t_calc= clock();
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
clock_t t_mem_out = clock();
clock_t t_gpu = t_mem_out-t_start;
clock_t t_gpu_mem_in = t_mem_in-t_start;
clock_t t_gpu_calc = t_calc-t_mem_in;
clock_t t_gpu_mem_out = t_mem_out-t_calc;
double time_taken_gpu = 1000*((double)t_gpu)/CLOCKS_PER_SEC; // in micro-second
double time_taken_mem_in = 1000*((double)t_gpu_mem_in)/CLOCKS_PER_SEC; // in micro-second
double time_taken_calc = 1000*((double)t_gpu_calc)/CLOCKS_PER_SEC; // in micro-second
double time_taken_mem_out = 1000*((double)t_gpu_mem_out)/CLOCKS_PER_SEC; // in micro-second
printf("GPU: %.2fms.\n", time_taken_gpu);
printf("\tmemory in: %.2fms.\n", time_taken_mem_in);
printf("\tkernel: %.2fms.\n", time_taken_calc);
printf("\tmemory out: %.2fms.\n", time_taken_mem_out);
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
167
|
__global__ void cudaTransientFstatExpWindow ( float *input,
unsigned int numAtoms,
unsigned int TAtom,
unsigned int t0_data,
unsigned int win_t0,
unsigned int win_dt0,
unsigned int win_tau,
unsigned int win_dtau,
unsigned int Fmn_rows,
unsigned int Fmn_cols,
float *Fmn
)
{
/* match CUDA thread indexing and high-level (t0,tau) indexing */
unsigned int m = blockDim.x * blockIdx.x + threadIdx.x; // t0: row
unsigned int n = blockDim.y * blockIdx.y + threadIdx.y; // tau: column
/* unraveled 1D index for 2D output array */
unsigned int outidx = Fmn_cols * m + n;
/* hardcoded copy from lalpulsar */
unsigned int TRANSIENT_EXP_EFOLDING = 3;
if ( (m < Fmn_rows) && (n < Fmn_cols) ) {
/* compute Fstat-atom index i_t0 in [0, numAtoms) */
unsigned int TAtomHalf = TAtom/2; // integer division
unsigned int t0 = win_t0 + m * win_dt0;
/* integer round: floor(x+0.5) */
int i_tmp = ( t0 - t0_data + TAtomHalf ) / TAtom;
if ( i_tmp < 0 ) {
i_tmp = 0;
}
unsigned int i_t0 = (unsigned int)i_tmp;
if ( i_t0 >= numAtoms ) {
i_t0 = numAtoms - 1;
}
/* translate n into an atoms end-index
* for this search interval [t0, t0+Tcoh],
* giving the index range of atoms to sum over
*/
unsigned int tau = win_tau + n * win_dtau;
/* get end-time t1 of this transient-window search
* for given tau, what Tcoh should the exponential window cover?
* for speed reasons we want to truncate
* Tcoh = tau * TRANSIENT_EXP_EFOLDING
* with the e-folding factor chosen such that the window-value
* is practically negligible after that, where it will be set to 0
*/
// unsigned int t1 = lround( win_t0 + TRANSIENT_EXP_EFOLDING * win_tau);
unsigned int t1 = t0 + TRANSIENT_EXP_EFOLDING * tau;
/* compute window end-time Fstat-atom index i_t1 in [0, numAtoms)
* using integer round: floor(x+0.5)
*/
i_tmp = ( t1 - t0_data + TAtomHalf ) / TAtom - 1;
if ( i_tmp < 0 ) {
i_tmp = 0;
}
unsigned int i_t1 = (unsigned int)i_tmp;
if ( i_t1 >= numAtoms ) {
i_t1 = numAtoms - 1;
}
/* now we have two valid atoms-indices [i_t0, i_t1]
* spanning our Fstat-window to sum over
*/
float Ad = 0.0f;
float Bd = 0.0f;
float Cd = 0.0f;
float Fa_re = 0.0f;
float Fa_im = 0.0f;
float Fb_re = 0.0f;
float Fb_im = 0.0f;
unsigned short input_cols = 7; // must match input matrix!
/* sum up atoms */
for ( unsigned int i=i_t0; i<=i_t1; i++ ) {
unsigned int t_i = t0_data + i * TAtom;
float win_i = 0.0;
if ( t_i >= t0 && t_i <= t1 ) {
float x = 1.0 * ( t_i - t0 ) / tau;
win_i = exp ( -x );
}
float win2_i = win_i * win_i;
Ad += input[i*input_cols+0] * win2_i; // a2_alpha
Bd += input[i*input_cols+1] * win2_i; // b2_alpha
Cd += input[i*input_cols+2] * win2_i; // ab_alpha
Fa_re += input[i*input_cols+3] * win_i; // Fa_alpha_re
Fa_im += input[i*input_cols+4] * win_i; // Fa_alpha_im
Fb_re += input[i*input_cols+5] * win_i; // Fb_alpha_re
Fb_im += input[i*input_cols+6] * win_i; // Fb_alpha_im
}
/* get inverse antenna pattern determinant,
* following safety checks from
* XLALComputeAntennaPatternSqrtDeterminant()
* and estimateAntennaPatternConditionNumber()
*/
float sumAB = Ad + Bd;
float diffAB = Ad - Bd;
float disc = sqrt ( diffAB*diffAB + 4.0 * Cd*Cd );
float denom = sumAB - disc;
float cond = (denom > 0) ? ((sumAB + disc) / denom) : INFINITY;
float DdInv = 0.0f;
if ( cond < 1e4 ) {
DdInv = 1.0 / ( Ad * Bd - Cd * Cd );
}
/* matching compute_fstat_from_fa_fb
* including default fallback = 0.5*E[2F] in noise
* when DdInv == 0 due to ill-conditionness of M_munu
*/
float F = 2;
if ( DdInv > 0 ) {
F = DdInv * ( Bd * ( Fa_re*Fa_re + Fa_im*Fa_im )
+ Ad * ( Fb_re*Fb_re + Fb_im*Fb_im )
- 2.0 * Cd * ( Fa_re * Fb_re + Fa_im * Fb_im )
);
}
/* store result in Fstat-matrix
* at unraveled index of element {m,n}
*/
Fmn[outidx] = F;
} // ( (m < Fmn_rows) && (n < Fmn_cols) )
} // cudaTransientFstatExpWindow()
|
168
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define N 1025
__global__ void CUDAStrCopy(char *A, char C[N])
{
int i = threadIdx.x;
C[i] = A[i] - 32;
// printf("%c\n", C[i]);
}
int main()
{
char A[N];
char C[N];
char *pA, *pC;
for (int i = 0; i < N; i++)
{
A[i] = 'a';
}
printf("C = \n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void **)&pA, N * sizeof(char));
cudaMalloc((void **)&pC, N * sizeof(char));
cudaMemcpy(pA, A, N * sizeof(char), cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("CUDA Error1: %s\n", cudaGetErrorString(error));
}
CUDAStrCopy<<<1, N>>>(pA, pC);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("CUDA Error2: %s\n", cudaGetErrorString(error));
}
cudaMemcpy(C, pC, N * sizeof(char), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("VALUE OF C IN HOST AFTER KERNEL EXECUTION\n");
for (int i = 0; i < N; i++)
printf("%c\n", C[i]);
printf("Time Taken=%f", elapsedTime);
cudaFree(pA);
cudaFree(pC);
printf("\n");
return 0;
}
|
169
|
//#include <cuda_runtime.h>
//#include "device_launch_parameters.h"
//#include <helper_cuda.h>
////#include "sm_20_atomic_functions.h"
//
//#include <thrust/host_vector.h>
//#include <thrust/device_vector.h>
//#include <thrust/count.h>
//#include <stdio.h>
//
//#define REAL float
////#define USE_CONST_MEM
//#define HANDLE_ERROR checkCudaErrors
//
//float elapsedTime;
//#define START_GPU {\
//elapsedTime = 0.0;\
//cudaEvent_t start, stop;\
//checkCudaErrors(cudaEventCreate(&start)); \
//checkCudaErrors(cudaEventCreate(&stop));\
//checkCudaErrors(cudaEventRecord(start, 0));\
//
//#define END_GPU \
//checkCudaErrors(cudaEventRecord(stop, 0));\
//checkCudaErrors(cudaEventSynchronize(stop));\
//checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); \
//printf("GPU Time used: %3.2f ms\n", elapsedTime);\
//checkCudaErrors(cudaEventDestroy(start));\
//checkCudaErrors(cudaEventDestroy(stop));}
//
//#define START_CPU {\
//double start = omp_get_wtime();
//
//#define END_CPU \
//double end = omp_get_wtime();\
//double duration = end - start;\
//printf("CPU Time used: %3.1f ms\n", duration * 1000);}
//
////############################################################################
//#ifdef _WIN64
//#define GLUT_NO_LIB_PRAGMA
//#pragma comment (lib, "opengl32.lib")
//#pragma comment (lib, "glut64.lib")
//#endif //_WIN64
//
///* On Windows, include the local copy of glut.h and glext.h */
//#include "GL/glut.h"
//#include "GL/glext.h"
//#define GET_PROC_ADDRESS( str ) wglGetProcAddress( str )
//
////----------------------װbitmap------------------------------
//struct CPUAnimBitmap {
// //
// unsigned char *pixels;
// int width, height;
// //һָ
// void *dataBlock;
//
// //Զ̬úָ
// void(*fAnim)(void*, int);
// void(*animExit)(void*);
// void(*clickDrag)(void*, int, int, int, int);
// int dragStartX, dragStartY;
//
// CPUAnimBitmap(int w, int h, void *d = NULL) {
// width = w;
// height = h;
// //r g b alph
// pixels = new unsigned char[width * height * 4];
// dataBlock = d;
// clickDrag = NULL;
// }
//
// ~CPUAnimBitmap() {
// delete[] pixels;
// }
//
// unsigned char* get_ptr(void) const { return pixels; }
// long image_size(void) const { return width * height * 4; }
//
// void click_drag(void(*f)(void*, int, int, int, int)) {
// clickDrag = f;
// }
//
// //ȾͼƬ
// //input: fʹGPUõbitmapͼƬĺ
// // ecuda
// void anim_and_exit(void(*f)(void*, int), void(*e)(void*)) {
// CPUAnimBitmap** bitmap = get_bitmap_ptr();
// *bitmap = this;
// fAnim = f;
// animExit = e;
// // a bug in the Windows GLUT implementation prevents us from
// // passing zero arguments to glutInit()
// int c = 1;
// char* dummy = "";
// glutInit(&c, &dummy);
// glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
// glutInitWindowSize(width, height);
// glutCreateWindow("bitmap");
// glutKeyboardFunc(Key);
// glutDisplayFunc(Draw);
//
// if (clickDrag != NULL)
// glutMouseFunc(mouse_func);
//
// //glutIdleFuncȫֵĻصûд¼ʱ
// //GLUTִܿк̨
// //ãidle functionᱻϵãֱд¼
// glutIdleFunc(idle_func);
// glutMainLoop();
// }
//
// // static method used for glut callbacks
// static CPUAnimBitmap** get_bitmap_ptr(void) {
// static CPUAnimBitmap* gBitmap;
// return &gBitmap;
// }
//
// // static method used for glut callbacks
// static void mouse_func(int button, int state,
// int mx, int my) {
// if (button == GLUT_LEFT_BUTTON) {
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// if (state == GLUT_DOWN) {
// bitmap->dragStartX = mx;
// bitmap->dragStartY = my;
// }
// else if (state == GLUT_UP) {
// bitmap->clickDrag(bitmap->dataBlock,
// bitmap->dragStartX,
// bitmap->dragStartY,
// mx, my);
// }
// }
// }
//
// // static method used for glut callbacks
// static void idle_func(void) {
// static int ticks = 1;
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// bitmap->fAnim(bitmap->dataBlock, ticks++);
// glutPostRedisplay();
// }
//
// // static method used for glut callbacks
// static void Key(unsigned char key, int x, int y) {
// switch (key) {
// case 27:
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// bitmap->animExit(bitmap->dataBlock);
// //delete bitmap;
// exit(0);
// }
// }
//
// // static method used for glut callbacks
// static void Draw(void) {
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// glClearColor(0.0, 0.0, 0.0, 1.0);
// glClear(GL_COLOR_BUFFER_BIT);
// glDrawPixels(bitmap->width, bitmap->height, GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels);
// glutSwapBuffers();
// }
//};
//
////ͼƬֵ
//#define DIM 1024
//#define rnd( x ) (x * rand() / RAND_MAX)
//#define INF 2e10f
//
////----------------------------װһ-------------------------------
//struct Sphere {
// REAL r, b, g;
// REAL radius;
// //Сλ
// REAL x, y, z;
// //ÿһ֡Сƶٶ
// REAL dx, dy, dz;
// bool isCrash;
// // ox,oyصĹߣǷཻ
// //ཻôľ롣
// //Ͷֻཻ¼ӽŻᱻ
// __device__ REAL hit(REAL ox, REAL oy, REAL *n) {
// REAL dx = ox - x;
// REAL dy = oy - y;
// //Сİ뾶ʱཻܺ
// if (dx*dx + dy*dy < radius*radius) {
// REAL dz = sqrtf(radius*radius - dx*dx - dy*dy);
// *n = dz / sqrtf(radius * radius);
// return dz + z;
// }
// //Զ
// return -INF;
// }
//};
//
////------------Сײĸ----------
//#define SPHERES 2000
//
//int *d_crashnum, *h_crashnum;
//
//#ifdef USE_CONST_MEM
//__constant__ Sphere d_spheres[SPHERES];
//#else
//Sphere *d_spheres;
//#endif
//
////------------------------cuda kernel --------------------------
//
//#define STEP_SIZE REAL(20.0)
//
////ײСĸ
//__global__ void crash(Sphere *s, int num_sphere, int*d_crashnum , int streamId , int streamNum)
//{
// //õײС
// int s1 = threadIdx.x + blockIdx.x * blockDim.x;
// int s2 = threadIdx.y + blockIdx.y * blockDim.y;
//
// s2 = s2 + 64 / 4 * streamId * 32;
// //Ϊx,yСײ,Գƾһľ
// if (s2 < num_sphere && s1 < num_sphere && s2 < s1)
// //if (s2 < num_sphere && s1 < num_sphere)
// {
// REAL dx = s[s1].x - s[s2].x;
// REAL dy = s[s1].y - s[s2].y;
// REAL dz = s[s1].z - s[s2].z;
// REAL totalRadius = s[s1].radius + s[s2].radius;
// //жǷײ
// if (dx*dx + dy*dy + dz*dz <= totalRadius * totalRadius)
// {
// s[s1].isCrash = true;
// s[s2].isCrash = true;
//
// //printf("y: %d x: %d\n", s2,s1);
//
// atomicAdd(d_crashnum, 1);
// }
// }
//}
//
//__global__ void addKernel(int * num0 , int * num1, int * num2, int * num3,int * res)
//{
// *res = *num0 + *num1 + *num2 + *num3;
//}
//
////ڵλ
//__global__ void kernelMoving(Sphere *s, int len)
//{
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// //Եx 壬ڵλ
// while (x < len) {
//
// s[x].isCrash = false;
// s[x].x += s[x].dx;
// s[x].y += s[x].dy;
// s[x].z += s[x].dz;
// x += gridDim.x*blockDim.x;
// }
//}
//
//#ifdef USE_CONST_MEM
//__global__ void kernel(unsigned char *ptr) {
//#else
//__global__ void kernel(Sphere *d_spheres, unsigned char *ptr) {
//#endif
// //õpixel صλá
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// int y = threadIdx.y + blockIdx.y * blockDim.y;
// //ǵڼ
// int offset = x + y * blockDim.x * gridDim.x;
// REAL ox = (x - DIM / 2);
// REAL oy = (y - DIM / 2);
//
// REAL r = 0, g = 0, b = 0;
// REAL maxz = -INF;
// for (int i = 0; i < SPHERES; i++) {
// REAL n;
// REAL t = d_spheres[i].hit(ox, oy, &n);
// if (t > maxz) {
// REAL fscale = n;
// if (d_spheres[i].isCrash)
// {
// r = 1.0f *fscale;
// g = 0.0f*fscale;
// b = 0.0f*fscale;
// }
// else
// {
// r = d_spheres[i].r * fscale;
// g = d_spheres[i].g * fscale;
// b = d_spheres[i].b * fscale;
// maxz = t;
// }
// }
// }
//
// ptr[offset * 4 + 0] = (int)(r * 255);
// ptr[offset * 4 + 1] = (int)(g * 255);
// ptr[offset * 4 + 2] = (int)(b * 255);
// ptr[offset * 4 + 3] = 255;
//}
//
//
//// globals needed by the update routine
//struct DataBlock {
// // gpu еbitmap
// unsigned char *dev_bitmap;
// //cpuдbitmap
// CPUAnimBitmap *bitmap;
//};
//
//
//#define streamNum 4
//cudaStream_t stream0, stream1, stream2, stream3;
//int *crashNum0, *crashNum1, *crashNum2, *crashNum3;
//Sphere *sphere0, *sphere1, *sphere2, *sphere3;
//
//void generate_frame(DataBlock *d, int ticks) {
// float totalTime = 0.0;
// //Сײļ0
// HANDLE_ERROR(cudaMemset(d_crashnum, 0, sizeof(int)));
// //Сĸ copyhost Уӡ
//
// START_GPU
//
// //------------ƶС --2000 ----------------
// kernelMoving << <64, 32 >> > (d_spheres, SPHERES);
// END_GPU
// totalTime += elapsedTime;
//
// //----------------------------stream handle-------------------------
// //ĸ
// START_GPU
//
// dim3 crashGrids(64, 64 / streamNum);
// dim3 crashBlock(32, 32);
//
// HANDLE_ERROR(cudaMemset(crashNum0, 0, sizeof(int)));
// HANDLE_ERROR(cudaMemset(crashNum1, 0, sizeof(int)));
// HANDLE_ERROR(cudaMemset(crashNum2, 0, sizeof(int)));
// HANDLE_ERROR(cudaMemset(crashNum3, 0, sizeof(int)));
//
// cudaMemcpyAsync(crashNum0, d_crashnum, sizeof(Sphere) * SPHERES, cudaMemcpyDeviceToDevice, stream0);
// cudaMemcpyAsync(crashNum1, d_crashnum, sizeof(Sphere) * SPHERES, cudaMemcpyDeviceToDevice, stream1);
// cudaMemcpyAsync(crashNum2, d_crashnum, sizeof(Sphere) * SPHERES, cudaMemcpyDeviceToDevice, stream2);
// cudaMemcpyAsync(crashNum3, d_crashnum, sizeof(Sphere) * SPHERES, cudaMemcpyDeviceToDevice, stream3);
//
// cudaMemcpyAsync(sphere0, d_spheres, sizeof(Sphere) * SPHERES, cudaMemcpyDeviceToDevice, stream0);
// cudaMemcpyAsync(sphere1, d_spheres, sizeof(Sphere) * SPHERES, cudaMemcpyDeviceToDevice, stream1);
// cudaMemcpyAsync(sphere2, d_spheres, sizeof(Sphere) * SPHERES, cudaMemcpyDeviceToDevice, stream2);
// cudaMemcpyAsync(sphere3, d_spheres, sizeof(Sphere) * SPHERES, cudaMemcpyDeviceToDevice, stream3);
//
// crash << <crashGrids, crashBlock, 0, stream0 >> > (sphere0, SPHERES, crashNum0, 3, streamNum);
// crash << <crashGrids, crashBlock, 0, stream1 >> > (sphere1, SPHERES, crashNum1, 2, streamNum);
// crash << <crashGrids, crashBlock, 0, stream2 >> > (sphere2, SPHERES, crashNum2, 1, streamNum);
// crash << <crashGrids, crashBlock, 0, stream3 >> > (sphere3, SPHERES, crashNum3, 0, streamNum);
//
// //----------------------ͬ------------------------------
// cudaStreamSynchronize(stream0);
// cudaStreamSynchronize(stream1);
// cudaStreamSynchronize(stream2);
// cudaStreamSynchronize(stream3);
//
// /*thrust::host_vector<int> crashNumList(4);
// crashNumList[0] = *crashNum0;
// crashNumList[1] = *crashNum1;
// crashNumList[2] = *crashNum2;
// crashNumList[3] = *crashNum3;
// int sum = thrust::reduce(crashNumList.begin(), crashNumList.end(), (int)0, thrust::plus<int>());*/
// //printf("num of pair sphere crash: %d\n", sum);
//
// addKernel << <1, 1 >> > (crashNum0, crashNum1, crashNum2, crashNum3, d_crashnum);
// //*d_crashnum = * + *crashNum1 + *crashNum2 + *crashNum3;
// END_GPU
//
// totalTime += elapsedTime;
//
// //-----------Сһŵ bitmap--------
// START_GPU
// dim3 grids(DIM / 16, DIM / 16);
// dim3 threads(16, 16);
//#ifdef USE_CONST_MEM
// kernel << <grids, threads >> > (d->dev_bitmap);
//#else
// kernel << <grids, threads >> > (d_spheres, d->dev_bitmap);
//#endif
//
// END_GPU
// totalTime += elapsedTime;
//
// //-----bitmap ݴ device host -----------
// HANDLE_ERROR(cudaMemcpy(d->bitmap->get_ptr(), d->dev_bitmap,
// d->bitmap->image_size(), cudaMemcpyDeviceToHost));
//
// HANDLE_ERROR(cudaMemcpy(h_crashnum, d_crashnum,sizeof(int), cudaMemcpyDeviceToHost));
// printf("num of pair sphere crash: %d\n", (*h_crashnum));
// printf("total time: %3.1f\n", totalTime);
// printf("---------------------------------------------\n");
//}
//
//// clean up memory allocated on the GPU
//void cleanup(DataBlock *d) {
// HANDLE_ERROR(cudaFree(d->dev_bitmap));
// //ͷСײĿռ
// HANDLE_ERROR(cudaFree(d_crashnum));
// free(h_crashnum);
//
// //----------free stream-----------
// cudaStreamDestroy(stream0);
// cudaStreamDestroy(stream1);
// cudaStreamDestroy(stream2);
// cudaStreamDestroy(stream3);
//
//}
//
////-------------------------main-------------------------------
//
//int main(void) {
// //-----------------Ƿ----------------------
// cudaDeviceProp prop;
// int whichDevice;
// cudaGetDevice(&whichDevice);
// cudaGetDeviceProperties(&prop, whichDevice);
// if (!prop.deviceOverlap) {
// printf("Device will not handle overlaps, so no speed up from streams\n");
// return;
// }
// else
// {
// printf("Device will handle overlaps, so we can speed up from streams\n");
// }
//
// //----------create stream-----------
// cudaStreamCreate(&stream0);
// cudaStreamCreate(&stream1);
// cudaStreamCreate(&stream2);
// cudaStreamCreate(&stream3);
//
// //--------------Сײ------------------
// HANDLE_ERROR(cudaMalloc(&crashNum0, sizeof(int)));
// HANDLE_ERROR(cudaMalloc(&crashNum1, sizeof(int)));
// HANDLE_ERROR(cudaMalloc(&crashNum2, sizeof(int)));
// HANDLE_ERROR(cudaMalloc(&crashNum3, sizeof(int)));
// HANDLE_ERROR(cudaMalloc(&sphere0, sizeof(Sphere) * SPHERES));
// HANDLE_ERROR(cudaMalloc(&sphere1, sizeof(Sphere) * SPHERES));
// HANDLE_ERROR(cudaMalloc(&sphere2, sizeof(Sphere) * SPHERES));
// HANDLE_ERROR(cudaMalloc(&sphere3, sizeof(Sphere) * SPHERES));
//
//
// //---------ͼƬĿռ----------
// DataBlock data;
// CPUAnimBitmap bitmap(DIM, DIM, &data);
// data.bitmap = &bitmap;
//
// //СײļĿռ
// h_crashnum = (int *)malloc(sizeof(int));
// *h_crashnum = 0;
//
// HANDLE_ERROR(cudaMalloc((void**)&d_crashnum, sizeof(int)));
// HANDLE_ERROR(cudaMemcpy(d_crashnum, h_crashnum,sizeof(int), cudaMemcpyHostToDevice));
// //---------gpuռ-------------
// HANDLE_ERROR(cudaMalloc((void**)&data.dev_bitmap, bitmap.image_size()));
//
//#ifdef USE_CONST_MEM
//#else
// HANDLE_ERROR(cudaMalloc((void**)&d_spheres, sizeof(Sphere) * SPHERES));
//#endif
//
// // allocate temp memory, initialize it, copy to constant
// // memory on the GPU, then free our temp memory
// Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
// for (int i = 0; i < SPHERES; i++) {
// temp_s[i].r = rnd(1.0f);
// temp_s[i].g = rnd(1.0f);
// temp_s[i].b = rnd(1.0f);
//
// temp_s[i].x = rnd(1000.0f) - 500;
// temp_s[i].y = rnd(1000.0f) - 500;
// temp_s[i].z = rnd(1000.0f) - 500;
// temp_s[i].radius = rnd(10.0f) + 5;
//
// //ʼ Сƶٶ
// temp_s[i].dx = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// temp_s[i].dy = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// temp_s[i].dz = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// }
//
//#ifdef USE_CONST_MEM
// HANDLE_ERROR(cudaMemcpyToSymbol(d_spheres, temp_s, sizeof(Sphere) * SPHERES));
//#else
// HANDLE_ERROR(cudaMemcpy(d_spheres, temp_s, sizeof(Sphere)*SPHERES, cudaMemcpyHostToDevice));
//#endif
//
// free(temp_s);
//
// // display
// bitmap.anim_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup);
//}
|
170
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
#define VSIZE 1024*50000
#define TSIZE 1024
#define BSIZE VSIZE/TSIZE
#define ITE 10
__global__ void add(float* a,float* b){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
b[idx] += 1;
a[idx] += b[idx];
}
int main(){
float *ha,*hb;
float *da,*db;
ha = (float*)malloc(sizeof(float)*VSIZE);
hb = (float*)malloc(sizeof(float)*VSIZE);
cudaMalloc((void**)&da,sizeof(float)*VSIZE);
cudaMalloc((void**)&db,sizeof(float)*VSIZE);
for(int i = 0 ; i < VSIZE ; i ++){
ha[i] = 0.0f;
hb[i] = 0.0f;
}
cudaMemcpy(da,ha,sizeof(float)*VSIZE,cudaMemcpyHostToDevice);
cudaMemcpy(db,hb,sizeof(float)*VSIZE,cudaMemcpyHostToDevice);
dim3 threads(TSIZE,1,1);
dim3 blocks (BSIZE,1,1);
printf("threads : %d\n",threads.x);
printf("blocks : %d\n",blocks.x);
for(int i = 0 ; i < ITE ; i ++){
add<<<blocks,threads>>>(da,db);
cudaDeviceSynchronize();
}
cudaMemcpy(ha,da,sizeof(float)*VSIZE,cudaMemcpyDeviceToHost);
for(int i = 0 ; i < VSIZE ; i ++){
if(ha[i] != ((ITE+1)*ITE)/2 ){
printf("ha[%d]\t%f\n",i,ha[i]);
printf("Result TEST : FAILED\n");
exit(-1);
}
}
printf("Result TEST : PASS\n");
free(ha);
free(hb);
cudaFree(da);
cudaFree(db);
return 0;
}
|
171
|
#include <cstdio>
#include <cassert>
#include <cuda_runtime.h>
using namespace std;
__global__ void matrix_multiplication(const int *d_indices,
const int *d_matrix,
const int *d_vector,
int *d_output,
int n,
int t) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
return;
int result = 0;
for (int i = 0 ; i < t; i++) {
const int multiplierIndex = index + d_indices[i];
if (multiplierIndex < 0 || multiplierIndex > n)
continue;
const int elemIndex = n * i + index;
result += d_matrix[elemIndex] * d_vector[multiplierIndex];
}
d_output[index] = result;
}
#define cudaCheckErrors(EXPR) assert(EXPR == cudaSuccess)
/// Function for fast integer fetching
int fetch_int() {
int result = 0;
char c = 0;
// skip all other chars
while (c < '0' or c > '9') {
c = getchar_unlocked();
}
while ('0' <= c and c <= '9') {
result *= 10;
result += c - '0';
c = getchar_unlocked();
}
return result;
}
int main() {
int n = fetch_int();
int t = fetch_int();
int *h_fullInput;
// We need memory for n indices, n * t matrix elements and n elements of vector.
const int full = n * (t + 2);
cudaCheckErrors(cudaMallocHost((void**)&h_fullInput, sizeof(int) * full));
int *h_indices = h_fullInput;
int *h_matrix = h_fullInput + n;
int *h_vector = h_matrix + n * t;
for (int i = 0; i < t; i++) {
h_indices[i] = fetch_int();
for (int j = 0 ; j < n ; j++) {
int index = t * i + j;
h_matrix[index] = fetch_int();
}
}
for (int i = 0 ; i < n ; i++) {
h_vector[i] = fetch_int();
}
int *d_fullInput;
cudaCheckErrors(cudaMalloc((void**)&d_fullInput, sizeof(int) * full));
cudaMemcpy(d_fullInput, h_fullInput, sizeof(int) * full, cudaMemcpyHostToDevice);
const int *d_indices = d_fullInput;
const int *d_matrix = d_fullInput + n;
const int *d_vector = d_matrix + n * t;
int * d_output;
cudaCheckErrors(cudaMalloc((void**)&d_output, sizeof(int) * n));
const int blockSize = 512;
const int gridSize = (n + blockSize - 1) / blockSize;
matrix_multiplication<<<gridSize, blockSize>>>(d_indices, d_matrix, d_vector, d_output, n, t);
// write output into indices to save malloc call
int * h_output = h_indices;
cudaMemcpy(h_output, d_output, sizeof(int) * n, cudaMemcpyDeviceToHost);
for (int i = 0 ; i < n ; i++) {
printf("%d\n", d_output[i]);
}
cudaFree(d_fullInput);
cudaFree(h_fullInput);
cudaFree(d_output);
}
|
172
|
#include "includes.h"
__global__ void nmfw(double *a, int r, int c, int k, double *w, double *h, double *wcp)//must be block synchronized!!!
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//compute W
if (col < k && row < r) {
//ah'
double sum = 0.0;
double temp = 0.0;
for (int i = 0; i < c; i++)
sum += a[row*c + i]*h[col*c + i];
temp = w[row*k+col]*sum;
//whh'
sum = 0.0;
for (int i = 0; i < c; i++) {
for (int j = 0; j < k; j++) {
sum += w[row*k + j]*h[j*c + i]*h[col*c+i];
}
}
__syncthreads();
wcp[row*k+col] = temp/sum;
}
}
|
173
|
/*
Reference: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.
html#ixzz4CtH09yed
*/
#include <cstdlib>
#include <ctime>
#include <cstdio>
#include <iostream>
#include <iomanip>
using namespace std;
// Generate random floats between 0 and UP_BOUND
#define UP_BOUND 100;
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 25
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaError_t err = cudaMalloc(&d_A.elements, size);
cout << "CUDA malloc A: " << cudaGetErrorString(err) << endl;
err = cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
cout << "Copy A to device: " << cudaGetErrorString(err) << "\n" << endl;
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
err = cudaMalloc(&d_B.elements, size);
cout << "CUDA malloc B: " << cudaGetErrorString(err) << endl;
err = cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
cout << "Copy B to device: " << cudaGetErrorString(err) << "\n" << endl;
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
err = cudaMalloc(&d_C.elements, size);
cout << "CUDA malloc C: " << cudaGetErrorString(err) << endl;
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((B.width + dimBlock.x - 1) / dimBlock.x,
(A.height + dimBlock.y - 1) / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
err = cudaThreadSynchronize();
cout << "Run kernel: " << cudaGetErrorString(err) << endl;
// Read C from device memory
err = cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cout << "Copy C off of device: " << cudaGetErrorString(err) << "\n" << endl;
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0.0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Boundary check before multiplication
if (row < A.height && col < B.width)
for (int e = 0; e < A.width; ++e)
Cvalue += (A.elements[row * A.width + e]) *
(B.elements[e * B.width + col]);
C.elements[row * C.width + col] = Cvalue;
}
int main(int argc, char const *argv[])
{
clock_t t;
Matrix A, B, C;
int a1, a2, b1, b2;
int i, j;
srand(time(NULL));
if (argc < 4)
cout << "Usage: ./accuracy.o A.height A.width B.width" << endl;
// Get dimensions of A and B
// Run $ ./matrixMul 1 1000000 400
a1 = atoi(argv[1]); // A's height
a2 = atoi(argv[2]); // A's width
b1 = a2; // B's height
b2 = atoi(argv[3]); // B's width
A.height = a1;
A.width = a2;
A.elements = new float[A.width * A.height];
B.height = b1;
B.width = b2;
B.elements = new float[B.width * B. height];
C.height = A.height;
C.width = B.width;
C.elements = new float[C.width * C.height];
// Fill A and B with random floats
for (i = 0; i < A.height; ++i)
for (j = 0; j < A.width; ++j)
A.elements[i * A.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
for (i = 0; i < B.height; ++i)
for (j = 0; j < B.width; ++j)
B.elements[i * B.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND;
// Call MatMul(), and therefore MatMulKernel()
t = clock();
MatMul(A, B, C);
// Print time multiplication took
t = clock() - t;
cout << "It took me ";
cout << fixed << setprecision(2) << ((float)t)/CLOCKS_PER_SEC;
cout << " seconds.\n" << endl;
// Print A, B, and C
for (i = 0; i < min(10, A.height); ++i) {
for (j = 0; j < min(10, A.width); ++j) {
cout << fixed << setprecision(3) << A.elements[i * A.width + j];
cout << "\t";
}
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, B.height); ++i) {
for (j = 0; j < min(10, B.width); ++j) {
cout << fixed << setprecision(3) << B.elements[i * B.width + j];
cout << "\t";
}
cout << endl;
}
cout << endl;
for (i = 0; i < min(10, C.height); ++i) {
for (j = 0; j < min(10, C.width); ++j) {
cout << fixed << setprecision(3) << C.elements[i * C.width + j];
cout << "\t";
}
cout << endl;
}
cout << endl;
delete[] A.elements;
delete[] B.elements;
delete[] C.elements;
return 0;
}
|
174
|
#include "includes.h"
__global__ void cuConvert8uC1To32fC1Kernel(const unsigned char *src, size_t src_stride, float* dst, size_t dst_stride, float mul_constant, float add_constant, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_stride + x;
int dst_c = y*dst_stride + x;
if (x<width && y<height)
{
dst[dst_c] = src[src_c] * mul_constant + add_constant;
}
}
|
175
|
#include "includes.h"
extern "C" {
}
#define TB 128
#define DISP_MAX 256
__global__ void rho(float *x, int size, float lambda)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
x[id] = 1 - exp(-x[id] / lambda);
}
}
|
176
|
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <cuda.h>
#define TIME 500 //# of iterations
#define BLKSIZE 24
#define DEBUG(s) {printf("peek "); printf(s); printf("\n");}
//#define DEBUG(s)
typedef unsigned long long bint;
__global__ void simulate(float *src, float* des, bint dim){
__shared__ float add[TIME+1][BLKSIZE];
//x, y location of thread - to MEM space
bint x = threadIdx.x;
bint y = threadIdx.y + blockIdx.x*blockDim.y;
bint id = threadIdx.x*(dim-2) + threadIdx.y + blockIdx.x*blockDim.y;
float v = src[id]/4;
//initialize
if (x>0){
add[x][y] = 0;
}
__syncthreads();
//load each v to up, left, right, down positions
if (x < TIME)
add[x+1][y] = v;
if (x > 0)
add[x-1][y] = v;
if (y%BLKSIZE > 0) //has sth on left
add[x][y%BLKSIZE-1] = v;
else if (y > 0)
des[id-1] = v; //global
if (y%BLKSIZE < BLKSIZE-1) //has sth on right
add[x][y%BLKSIZE+1] = v;
else if (y < dim-3)
des[id+1] = v; //global
__syncthreads();
// GMT once for all
if ((x > 0) && (y < dim-2))
des[id] += add[x][y];
}
__global__ void assembly(float *d1, float *d2, float *m, bint dim){
__shared__ float tmp[TIME+1];
bint x = threadIdx.x;
bint y = threadIdx.y;
bint id1 = threadIdx.x*(dim-2) + threadIdx.y;
bint id2 = (threadIdx.x+1)*(dim-2) - threadIdx.y - 1; //upright box
tmp[x] = d2[x]; //global load to shared
__syncthreads();
// GMT
m[y] = tmp[y];
if (y < TIME){
d1[id1] += tmp[y+1];
d1[id2] += tmp[y+1];
d2[id1] += tmp[y+1];
d2[id2] += tmp[y+1];
m[dim-2-y] = tmp[y+1];
}
}
float * config(bint dim){
//allocate on host and initialize
float *bar1 = (float *)calloc((dim-2)*(TIME+1), sizeof(float)); //side with 150
float *bar2 = (float *)calloc((dim-2)*(TIME+1), sizeof(float)); //side all 80
bint p;
for (p=0; p < dim-2; p++){
bar1[p] = 80;
bar2[p] = 80;
if ((p>=10) && (p<=30)){
bar1[p] = 150;
}
}
//config kernel
dim3 blkdim;
blkdim.x = TIME+1;
blkdim.y = BLKSIZE;
bint griddim = ceil((double)(dim-2)/BLKSIZE);
//allocate on kernel
bint mem = (dim-2)*(TIME+1)*sizeof(float);
float *src1, *des1;
cudaMalloc((void **)&src1, mem);
cudaMalloc((void **)&des1, mem);
cudaMemcpy(src1, bar1, mem, cudaMemcpyHostToDevice);
cudaMemcpy(des1, bar1, mem, cudaMemcpyHostToDevice);
float *src2, *des2;
cudaMalloc((void **)&src2, mem);
cudaMalloc((void **)&des2, mem);
cudaMemcpy(src2, bar2, mem, cudaMemcpyHostToDevice);
cudaMemcpy(des2, bar2, mem, cudaMemcpyHostToDevice);
DEBUG("loaded")
free(bar1);
free(bar2);
//launch
bint i;
for (i=0; i<TIME; i++){
if (i%2==0){
simulate<<<griddim, blkdim>>>(src1, des1, dim);
simulate<<<griddim, blkdim>>>(src2, des2, dim);
}else{
simulate<<<griddim, blkdim>>>(des1, src1, dim);
simulate<<<griddim, blkdim>>>(src2, des2, dim);
}}
// clean up
float *d1, *d2;
if (TIME%2==0){ //result in src
cudaFree(des1);
cudaFree(des2);
d1 = src1;
d2 = src2;
}
else{
cudaFree(src1);
cudaFree(src2);
d1 = des1;
d2 = des2;
}
DEBUG("simulated")
//assembly
float *mid;
cudaMalloc((void **)&mid, dim*sizeof(float)); //result for middle lines
dim3 blk;
blk.x = TIME+1;
blk.y = (TIME%32==0) ? TIME : TIME+32-TIME%32; //first 32n >= TIME
assembly<<<1, blk>>>(d1, d2, mid, dim);
DEBUG("assemblied")
/*
//cpu assembly
float *m = (float *)malloc(dim*dim*sizeof(float));
bint unit = (dim-2)*sizeof(float);
for (i=0; i<TIME+1; i++){
m[i*dim] = 80;
cudaMemcpy(&m[i*dim+1], &d1[i*(dim-2)], unit, cudaMemcpyDeviceToHost);
m[(i+1)*dim-1] = 80;
}
cudaFree(d1);
//done line 0 to TIME
float *middlelines = (float *)malloc(dim*sizeof(float));
cudaMemcpy(middlelines, mid, dim*sizeof(float), cudaMemcpyDeviceToHost);
for (i=TIME+1; i<dim-1-TIME; i++){
middlelines[i] = 0; //no temperature for the middle region
}
middlelines[dim-1] = 80;
for (i=TIME+1; i<dim-1-TIME; i++){
memcpy(&m[i*dim], middlelines, dim*sizeof(float));
}
cudaFree(mid);
//done for TIME+1...dim-TIME-1
for (i=dim-1-TIME; i<dim; i++){
m[i*dim] = 80;
cudaMemcpy(&m[i*dim+1], &d2[i*(dim-2)], unit, cudaMemcpyDeviceToHost);
m[(i+1)*dim-1] = 80;
}
cudaFree(d2);
//done dim-TIME-1...dim-1
return m;
*/
cudaFree(d1);
cudaFree(d2);
cudaFree(mid);
return NULL;
}
float avg(float *m, bint dim){
bint size = dim*dim;
float sum = 0;
bint i;
for (i=0; i<size; i++){
sum += m[i];
//if (i % dim==0)
// printf("\n");
//printf("%f ", m[i]);
}
//printf("\n");
return sum/size;
}
int main(int argc, char *argv[]){
//getDeviceProp();
if (argc < 2){
printf("Please indicate matrix size.\n");
exit(0);
}
bint n = atoi(argv[1]);
float *x = config(n+1);
if (x != NULL){
float mean = avg(x, n+1);
printf("peek mean: %f\n", mean);
free(x);
}
return 0;
}
|
177
|
/* Copyright 2018 Maxwel Gama Monteiro Junior
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|||||||||||||||| ||||||||||||||||
|||||||||||||||| CUDADriller ||||||||||||||||
|||||||||||||||| ||||||||||||||||
|||||||||||||||| Code Version: 2.0 ||||||||||||||||
|||||||||||||||| Last updated: 06/2017 ||||||||||||||||
|||||||||||||||| Author: Maxwel Gama Monteiro Junior ||||||||||||||||
|||||||||||||||| Built based on DOI:10.5151/phypro-sic100-046 ||||||||||||||||
Contact:maxweljr@gmail.com ||||||||||||||||
_____ ||||||||||||||||
/. \ ||||||||||||||||
/ . . \ ||||||||||||||||
\ . . / ||||||||||||||||
\_____/ ||||||||||||||||
A___A ||||||||||||||||
A___A |o o| ||||||||||||||||
____ / o o \ |='=| ||||||||||||||||
___/~____ ='= /_____/ |_________ ||||||||||||||||
(______)__m_m_) / |||| ||||||||||||||||
|___||||] ||||||||||||||||
||||||||||||||||
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
*/
//LIBRARIES
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <curand.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//GPU KERNEL
//DRILL: excludes sites inside random spheres - a whole lot of branching
__global__ void drill(float *nran, float *nran2, float *x, float *y, float *z, int *in, int *natom, int l, int hole, int a, int b, int c, float p)
{
int n = threadIdx.x + blockIdx.x * gridDim.x;
extern __shared__ int cache[];
int temp = 0;
while(n < l)
{
natom[blockIdx.x] = 0;
int t_est = 0;
float4 r; //We will settle for single precision as this code does not pertain to energy values or order parameters, preserving precision even in large densities
float4 rn;
rn.x = x[n];
rn.y = y[n];
rn.z = z[n];
#pragma unroll
for(int w = 0; w < hole; w++)
{
r.x = p*a*nran[3*w];
r.y = p*b*nran[3*w+1];
r.z = p*c*nran[3*w+2];
r.w = p*nran2[w];
r.w = r.w*r.w;
rn.w = (rn.x - r.x)*(rn.x - r.x);
rn.w += (rn.y - r.y)*(rn.y - r.y);
rn.w += (rn.z - r.z)*(rn.z - r.z);
if( rn.w <= r.w) //Check atonm position against all the vacancies to see if it lies inside of any
{
t_est = 1;
}
}
if(t_est == 0)
{
temp++; //atom is not inside any of the vacancies, must be counted
in[n] = 0;
}
else in[n] = 1;
n += blockDim.x * gridDim.x;
}
cache[threadIdx.x] = temp;
__syncthreads();
int u = blockDim.x/2;
while(u != 0)
{
if (threadIdx.x < u)
{
cache[threadIdx.x] += cache[threadIdx.x + u];
}
__syncthreads();
u /= 2;
}
if (threadIdx.x == 0) natom[blockIdx.x] = cache[0];
}
//VARIABLES AND CONSTANTS
curandGenerator_t datagen; //info on holes to be drilled
unsigned int grid = 512; //CUDA blocks per grid (X x Y x Z) - 1 if implicit
unsigned int block = 512;//CUDA threads per block (X x Y x Z) - 1 if implicit
int *natom, *nat; //Effective number of lattice atoms excluding holes
int *in; //Integer array of binary values (1 = vacancy site, 0 = element)
int *data; //Integer array of binary values, host
int a,b,c; //Integer lattice dimensions
int i,j,k,l; //Who doesn't love iterations
int count = 0; //Total count of lattice atoms
int hole; //Total count of holes to be drilled
int r; //(Integer) average size of holes i.e mean of gaussian hole
long long seed_uni; //Seed for uniform and gaussian random number generation
float p; //Lattice parameter
float *nran; //Random number distribution (uniform)
float *nran2; //Random number distribution (gaussian)
float *pore, *porer; //Vacancy data (x,y,z,r) format
float stddeva; //Standard deviation of gaussian distribution of hole sizes
float ang1, ang2, ang3; //Angstron input value
int elem1, elem2; //Lattice composition (elements)
double cpu_time_used; //Measured total simulation time
clock_t start_t, end_t; //Ticks of CPU clock to measure time
float *x, *y, *z; //Buffer filling of lattice volume
float *fx, *fy, *fz; //Effective FCC filling of lattice volume
float *dx, *dy, *dz; //Memory block for CUDA device - if it doesn't fit now you can be sure you can't optimize/dynamics
FILE *inp_file;
FILE *out_file;
FILE *dat_file;
//MAIN PROGRAM
int main()
{
//Pseudo Random Number Generators
curandCreateGenerator(&datagen, CURAND_RNG_PSEUDO_MTGP32);
//I/O
if((inp_file=fopen("cheesinput.dat", "r")) == NULL){
printf("ERROR 404: input file not found\nPlease check and retry\n");
exit(1);
}
if((out_file=fopen("coord_z.xyz", "w")) == NULL){
printf("ERROR 777: output file could not be opened\nPlease check system and retry\n");
exit(1);
}
if ((dat_file=fopen("porus.dat", "w")) == NULL){
printf("ERROR 999: pore files could not be opened\nPlease check system and retry\n");
exit(1);
}
//READING FILES
fscanf(inp_file, "%d %d\n", &elem1, &elem2); //Reading strings is a mess and so is fscanf in general, please beware of this
fscanf(inp_file, "%f\n", &p);
fscanf(inp_file, "%f %f %f\n", &ang1, &ang2, &ang3);
p /= 2.0;
a = 0;
b = 0;
c = 0;
while( (float(a+1)*p) <= ang1) a++;
while( (float(b+1)*p) <= ang2) b++;
while( (float(c+1)*p) <= ang3) c++;
fscanf(inp_file, "%d\n", &hole);
fscanf(inp_file, "%f\n", &ang1);
fscanf(inp_file, "%f\n", &stddeva);
r = 0;
while( (float(r+1)*p) <= ang1) r++;
//printf("\n a is %d \n b is %d \n c is %d \n r is %d\n", a,b,c,r);
fscanf(inp_file, "%lld\n", &seed_uni);
curandSetPseudoRandomGeneratorSeed(datagen, seed_uni);
//FILLING THE VOLUME WITH A FCC LATTICE
fx = NULL;
fy = NULL;
fz = NULL;
l = 0;
start_t = clock();
for(i = 0; i < a; i++){
for(j = 0; j < b; j++){
for(k = 0; k < c; k++){
//Filling space with the triplets U={(x,y,z)|x+y+z is even} creates the FCC Bravais Lattice, and the corresponding Space Group
if((i+j+k)%2 == 0)
{
l++;
/*What is faster? One loop to count, allocate in one go, and another loop to fill vectors
//Or fill in as allocate with realloc? Alternatively, realloc with bigger blocks and remove overhead
//Alternatively use a kernel for this (calculating the elements which belong to U)
//Note that this procedure is still fast enough, as realistically we will not have issues with structure-building
//Compared to everything else that has to be done to the structure (optimization, dynamics, etc.)
*/
fx = (float*) realloc (x, sizeof(float)*l);
fy = (float*) realloc (y, sizeof(float)*l);
fz = (float*) realloc (z, sizeof(float)*l);
if( (fx != NULL)&&(fy != NULL)&&(fz != NULL) )
{
x = fx;
y = fy;
z = fz;
x[l-1] = p*i;
y[l-1] = p*j;
z[l-1] = p*k;
}
else
{
puts("Error allocating new memory block. Please try again with a smaller file size");
exit(1);
}
}
}
}
}
gpuErrchk( cudaMalloc((void**)&dx, sizeof(float)*l) );
gpuErrchk( cudaMalloc((void**)&dy, sizeof(float)*l) );
gpuErrchk( cudaMalloc((void**)&dz, sizeof(float)*l) );
gpuErrchk( cudaMalloc((void**)&in, sizeof(int)*l) );
cudaMalloc((void**)&natom, sizeof(int)*block);
cudaMallocHost((void**)&nat, sizeof(int)*block);
cudaMalloc((void**)&nran, sizeof(float)*3*hole);
cudaMalloc((void**)&nran2, sizeof(float)*hole);
cudaMallocHost((void**)&data, sizeof(int)*l);
cudaMallocHost((void**)&pore, sizeof(float)*3*hole);
cudaMallocHost((void**)&porer, sizeof(float)*hole);
float rf = float(r);
float stand = float(stddeva);
curandGenerateUniform(datagen, nran, 3*hole);
curandGenerateNormal(datagen, nran2, hole, rf, stand);
//LOADING GPU
cudaMemcpyAsync(dx, x, sizeof(float)*l, cudaMemcpyHostToDevice);
cudaMemcpyAsync(dy, y, sizeof(float)*l, cudaMemcpyHostToDevice);
cudaMemcpyAsync(dz, z, sizeof(float)*l, cudaMemcpyHostToDevice);
drill<<<grid,block,block*sizeof(int)>>>(nran, nran2, dx, dy, dz, in, natom, l, hole, a, b, c, p);
cudaMemcpy(nat, natom, sizeof(int)*block, cudaMemcpyDeviceToHost);
cudaMemcpy(data, in, sizeof(int)*l, cudaMemcpyDeviceToHost);
cudaMemcpy(pore, nran, sizeof(float)*3*hole, cudaMemcpyDeviceToHost);
cudaMemcpy(porer, nran2, sizeof(float)*hole, cudaMemcpyDeviceToHost);
for(i = 0; i < block; i++){
count+=nat[i];
}
//int testdat = 0;
//WRITING OUTPUT DATA
fprintf(out_file,"%d\n",count);
fprintf(out_file,"%f\n",2.0*p);
//#pragma unroll
for(i = 0; i < l; i++)
{
if(data[i] == 0 )
{
//testdat++;
if(i%2==0) fprintf(out_file, "%d %.5f %.5f %.5f\n", elem1, x[i], y[i], z[i]); //String types are problematic
else fprintf(out_file, "%d %.5f %.5f %.5f\n", elem2, x[i], y[i], z[i]); //Prefer atomic number instead
}
}
//printf("data was 0 %d times\n total number of elements is %d\n", testdat,l); //Test only
#pragma unroll
for(i = 0; i < hole; i++)
{
int xk = int(p*a*pore[3*i]);
int yk = int(p*b*pore[3*i+1]);
int zk = int(p*c*pore[3*i+2]);
int rk = int(porer[i]*p);
fprintf(dat_file, "%d %d %d %d\n", xk, yk, zk, rk );
}
//WRAPPING UP
curandDestroyGenerator(datagen);
cudaFree(dx);
cudaFree(dy);
cudaFree(dz);
cudaFree(in);
cudaFree(natom);
cudaFree(nran);
cudaFree(nran2);
cudaFreeHost(pore);
cudaFreeHost(porer);
cudaFreeHost(data);
cudaFreeHost(nat);
free(x);
free(y);
free(z);
end_t = clock();
cpu_time_used = ((double) (end_t - start_t)) / CLOCKS_PER_SEC;
printf(">>>Ending Simulation\n");
printf("Total Time Elapsed (s) %13.3lf\n", cpu_time_used);
printf("======================================================================~\n");
return 0;
}
|
178
|
// Copyright (c) 2019-2020, NVIDIA CORPORATION.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/complex.h>
///////////////////////////////////////////////////////////////////////////////
// CONVOLVE //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_convolve( const T *__restrict__ inp,
const int inpW,
const T *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
T *__restrict__ out,
const int outW ) {
const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int stride { static_cast<int>( blockDim.x * gridDim.x ) };
for ( int tid = tx; tid < outW; tid += stride ) {
T temp {};
if ( mode == 0 ) { // Valid
if ( tid >= 0 && tid < inpW ) {
for ( int j = 0; j < kerW; j++ ) {
temp += inp[tid + j] * kernel[( kerW - 1 ) - j];
}
}
} else if ( mode == 1 ) { // Same
const int P1 { kerW / 2 };
int start {};
if ( !swapped_inputs ) {
start = 0 - P1 + tid;
} else {
start = ( ( inpW - 1 ) / 2 ) - ( kerW - 1 ) + tid;
}
for ( int j = 0; j < kerW; j++ ) {
if ( ( start + j >= 0 ) && ( start + j < inpW ) ) {
temp += inp[start + j] * kernel[( kerW - 1 ) - j];
}
}
} else { // Full
const int P1 { kerW - 1 };
const int start { 0 - P1 + tid };
for ( int j = 0; j < kerW; j++ ) {
if ( ( start + j >= 0 ) && ( start + j < inpW ) ) {
temp += inp[start + j] * kernel[( kerW - 1 ) - j];
}
}
}
out[tid] = temp;
}
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve_int32( const int *__restrict__ inp,
const int inpW,
const int *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
int *__restrict__ out,
const int outW ) {
_cupy_convolve<int>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve_int64( const long int *__restrict__ inp,
const int inpW,
const long int *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
long int *__restrict__ out,
const int outW ) {
_cupy_convolve<long int>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve_float32( const float *__restrict__ inp,
const int inpW,
const float *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
float *__restrict__ out,
const int outW ) {
_cupy_convolve<float>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve_float64( const double *__restrict__ inp,
const int inpW,
const double *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
double *__restrict__ out,
const int outW ) {
_cupy_convolve<double>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_convolve_complex64( thrust::complex<float> *__restrict__ inp,
const int inpW,
thrust::complex<float> *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
thrust::complex<float> *__restrict__ out,
const int outW ) {
_cupy_convolve<thrust::complex<float>>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_convolve_complex128( const thrust::complex<double> *__restrict__ inp,
const int inpW,
const thrust::complex<double> *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
thrust::complex<double> *__restrict__ out,
const int outW ) {
_cupy_convolve<thrust::complex<double>>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW );
}
///////////////////////////////////////////////////////////////////////////////
// CORRELATE //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_correlate( const T *__restrict__ inp,
const int inpW,
const T *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
T *__restrict__ out,
const int outW ) {
const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int stride { static_cast<int>( blockDim.x * gridDim.x ) };
for ( int tid = tx; tid < outW; tid += stride ) {
T temp {};
if ( mode == 0 ) { // Valid
if ( tid >= 0 && tid < inpW ) {
for ( int j = 0; j < kerW; j++ ) {
temp += inp[tid + j] * kernel[j];
}
}
} else if ( mode == 1 ) { // Same
const int P1 { kerW / 2 };
int start {};
if ( !swapped_inputs ) {
start = 0 - P1 + tid;
} else {
start = ( ( inpW - 1 ) / 2 ) - ( kerW - 1 ) + tid + 1;
}
for ( int j = 0; j < kerW; j++ ) {
if ( ( start + j >= 0 ) && ( start + j < inpW ) ) {
temp += inp[start + j] * kernel[j];
}
}
} else { // Full
const int P1 { kerW - 1 };
const int start { 0 - P1 + tid };
for ( int j = 0; j < kerW; j++ ) {
if ( ( start + j >= 0 ) && ( start + j < inpW ) ) {
temp += inp[start + j] * kernel[j];
}
}
}
if ( swapped_inputs ) {
out[outW - tid - 1] = temp; // TODO: Move to shared memory
} else {
out[tid] = temp;
}
}
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_correlate_int32( const int *__restrict__ inp,
const int inpW,
const int *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
int *__restrict__ out,
const int outW ) {
_cupy_correlate<int>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_correlate_int64( const long int *__restrict__ inp,
const int inpW,
const long int *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
long int *__restrict__ out,
const int outW ) {
_cupy_correlate<long int>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_correlate_float32( const float *__restrict__ inp,
const int inpW,
const float *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
float *__restrict__ out,
const int outW ) {
_cupy_correlate<float>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_correlate_float64( const double *__restrict__ inp,
const int inpW,
const double *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
double *__restrict__ out,
const int outW ) {
_cupy_correlate<double>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_correlate_complex64( thrust::complex<float> *__restrict__ inp,
const int inpW,
thrust::complex<float> *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
thrust::complex<float> *__restrict__ out,
const int outW ) {
_cupy_correlate<thrust::complex<float>>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_correlate_complex128( const thrust::complex<double> *__restrict__ inp,
const int inpW,
const thrust::complex<double> *__restrict__ kernel,
const int kerW,
const int mode,
const bool swapped_inputs,
thrust::complex<double> *__restrict__ out,
const int outW ) {
_cupy_correlate<thrust::complex<double>>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW );
}
///////////////////////////////////////////////////////////////////////////////
// CONVOLVE 2D //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_convolve2D( const T *__restrict__ inp,
const int inpW,
const int inpH,
const T *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
T *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
const int ty { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int tx { static_cast<int>( blockIdx.y * blockDim.y + threadIdx.y ) };
int i {};
if ( pick != 3 ) {
i = tx + S0;
} else {
i = tx + S1;
}
int j { ty + S0 };
int2 oPixelPos { tx, ty };
if ( ( tx < outH ) && ( ty < outW ) ) {
T temp {};
// Odd kernel
if ( pick == 1 ) {
for ( int k = -S0; k < ( S0 + 1 ); k++ ) {
for ( int l = -S0; l < ( S0 + 1 ); l++ ) {
int2 iPixelPos { ( i + k ), ( j + l ) };
int2 coefPos { ( -k + S0 ), ( -l + S0 ) };
temp += inp[iPixelPos.x * inpW + iPixelPos.y] * kernel[coefPos.x * kerW + coefPos.y];
}
}
// Even kernel
} else if ( pick == 2 ) {
for ( int k = -S0; k < S0; k++ ) {
for ( int l = -S0; l < S0; l++ ) {
int2 iPixelPos { ( i + k ), ( j + l ) };
int2 coefPos { ( -k + S0 - 1 ), ( -l + S0 - 1 ) };
temp += inp[iPixelPos.x * inpW + iPixelPos.y] * kernel[coefPos.x * kerW + coefPos.y];
}
}
// Non-squares kernel
} else {
for ( int k = 0; k < S0; k++ ) {
for ( int l = 0; l < S1; l++ ) {
int2 iPixelPos { ( i + k - S1 ), ( j + l - S0 ) };
int2 coefPos { ( -k + S0 - 1 ), ( -l + S1 - 1 ) };
temp += inp[iPixelPos.x * inpW + iPixelPos.y] * kernel[coefPos.x * kerH + coefPos.y];
}
}
}
out[oPixelPos.x * outW + oPixelPos.y] = temp;
}
}
extern "C" __global__ void __launch_bounds__( 256 ) _cupy_convolve2D_int32( const int *__restrict__ inp,
const int inpW,
const int inpH,
const int *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
int *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
_cupy_convolve2D<int>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick );
}
extern "C" __global__ void __launch_bounds__( 256 ) _cupy_convolve2D_int64( const long int *__restrict__ inp,
const int inpW,
const int inpH,
const long int *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
long int *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
_cupy_convolve2D<long int>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick );
}
extern "C" __global__ void __launch_bounds__( 256 ) _cupy_convolve2D_float32( const float *__restrict__ inp,
const int inpW,
const int inpH,
const float *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
float *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
_cupy_convolve2D<float>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick );
}
extern "C" __global__ void __launch_bounds__( 256 ) _cupy_convolve2D_float64( const double *__restrict__ inp,
const int inpW,
const int inpH,
const double *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
double *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
_cupy_convolve2D<double>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick );
}
extern "C" __global__ void __launch_bounds__( 256 )
_cupy_convolve2D_complex64( const thrust::complex<float> *__restrict__ inp,
const int inpW,
const int inpH,
const thrust::complex<float> *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
thrust::complex<float> *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
_cupy_convolve2D<thrust::complex<float>>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick );
}
extern "C" __global__ void __launch_bounds__( 256 )
_cupy_convolve2D_complex128( const thrust::complex<double> *__restrict__ inp,
const int inpW,
const int inpH,
const thrust::complex<double> *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
thrust::complex<double> *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
_cupy_convolve2D<thrust::complex<double>>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick );
}
///////////////////////////////////////////////////////////////////////////////
// CORRELATE 2D //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_correlate2D( const T *__restrict__ inp,
const int inpW,
const int inpH,
const T *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
T *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
const int ty { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int tx { static_cast<int>( blockIdx.y * blockDim.y + threadIdx.y ) };
int i {};
if ( pick != 3 ) {
i = tx + S0;
} else {
i = tx + S1;
}
int j { ty + S0 };
int2 oPixelPos { tx, ty };
if ( ( tx < outH ) && ( ty < outW ) ) {
T temp {};
// Odd
if ( pick == 1 ) {
for ( int k = -S0; k < ( S0 + 1 ); k++ ) {
for ( int l = -S0; l < ( S0 + 1 ); l++ ) {
int2 iPixelPos { ( i + k ), ( j + l ) };
int2 coefPos { ( k + S0 ), ( l + S0 ) };
temp += inp[iPixelPos.x * inpW + iPixelPos.y] * kernel[coefPos.x * kerW + coefPos.y];
}
}
// Even
} else if ( pick == 2 ) {
for ( int k = -S0; k < S0; k++ ) {
for ( int l = -S0; l < S0; l++ ) {
int2 iPixelPos { ( i + k ), ( j + l ) }; // iPixelPos[1], [0]
int2 coefPos { ( k + S0 ), ( l + S0 ) };
temp += inp[iPixelPos.x * inpW + iPixelPos.y] * kernel[coefPos.x * kerW + coefPos.y];
}
}
// Non-squares
} else {
for ( int k = 0; k < S0; k++ ) {
for ( int l = 0; l < S1; l++ ) {
int2 iPixelPos { ( i + k - S1 ), ( j + l - S0 ) };
int2 coefPos { k, l };
temp += inp[iPixelPos.x * inpW + iPixelPos.y] * kernel[coefPos.x * kerH + coefPos.y];
}
}
}
out[oPixelPos.x * outW + oPixelPos.y] = temp;
}
}
extern "C" __global__ void __launch_bounds__( 256 ) _cupy_correlate2D_int32( const int *__restrict__ inp,
const int inpW,
const int inpH,
const int *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
int *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
_cupy_correlate2D<int>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick );
}
extern "C" __global__ void __launch_bounds__( 256 ) _cupy_correlate2D_int64( const long int *__restrict__ inp,
const int inpW,
const int inpH,
const long int *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
long int *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
_cupy_correlate2D<long int>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick );
}
extern "C" __global__ void __launch_bounds__( 256 ) _cupy_correlate2D_float32( const float *__restrict__ inp,
const int inpW,
const int inpH,
const float *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
float *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
_cupy_correlate2D<float>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick );
}
extern "C" __global__ void __launch_bounds__(256 ) _cupy_correlate2D_float64( const double *__restrict__ inp,
const int inpW,
const int inpH,
const double *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
double *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
_cupy_correlate2D<double>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick );
}
extern "C" __global__ void __launch_bounds__(256 )
_cupy_correlate2D_complex64( const thrust::complex<float> *__restrict__ inp,
const int inpW,
const int inpH,
const thrust::complex<float> *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
thrust::complex<float> *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
_cupy_correlate2D<thrust::complex<float>>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick );
}
extern "C" __global__ void __launch_bounds__( 256 )
_cupy_correlate2D_complex128( const thrust::complex<double> *__restrict__ inp,
const int inpW,
const int inpH,
const thrust::complex<double> *__restrict__ kernel,
const int kerW,
const int kerH,
const int S0,
const int S1,
thrust::complex<double> *__restrict__ out,
const int outW,
const int outH,
const int pick ) {
_cupy_correlate2D<thrust::complex<double>>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick );
}
///////////////////////////////////////////////////////////////////////////////
// CONVOLVE 1D2O //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_convolve1D2O( const T *__restrict__ inp,
const int inpW,
const T *__restrict__ kernel,
const int kerW,
const int kerH,
const int mode,
T *__restrict__ out,
const int outW ) {
const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int stride { static_cast<int>( blockDim.x * gridDim.x ) };
for ( int tid = tx; tid < outW; tid += stride ) {
T temp {};
if ( mode == 0 ) { // Valid
if ( tid >= 0 && tid < inpW ) {
for ( int i = 0; i < kerW; i++ ) {
for ( int j = 0; j < kerH; j++ ) {
temp += inp[tid + kerW - i - 1] * inp[tid + kerH - j - 1] * kernel[ kerW * i + j];
}
}
}
}
out[tid] = temp;
}
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D2O_int32( const int *__restrict__ inp,
const int inpW,
const int *__restrict__ kernel,
const int kerW,
const int kerH,
const int mode,
int *__restrict__ out,
const int outW ) {
_cupy_convolve1D2O<int>( inp, inpW, kernel, kerW, kerH, mode, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D2O_int64( const long int *__restrict__ inp,
const int inpW,
const long int *__restrict__ kernel,
const int kerW,
const int kerH,
const int mode,
long int *__restrict__ out,
const int outW ) {
_cupy_convolve1D2O<long int>( inp, inpW, kernel, kerW, kerH, mode, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D2O_float32( const float *__restrict__ inp,
const int inpW,
const float *__restrict__ kernel,
const int kerW,
const int kerH,
const int mode,
float *__restrict__ out,
const int outW ) {
_cupy_convolve1D2O<float>( inp, inpW, kernel, kerW, kerH, mode, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D2O_float64( const double *__restrict__ inp,
const int inpW,
const double *__restrict__ kernel,
const int kerW,
const int kerH,
const int mode,
double *__restrict__ out,
const int outW ) {
_cupy_convolve1D2O<double>( inp, inpW, kernel, kerW, kerH, mode, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_convolve1D2O_complex64( thrust::complex<float> *__restrict__ inp,
const int inpW,
thrust::complex<float> *__restrict__ kernel,
const int kerW,
const int kerH,
const int mode,
thrust::complex<float> *__restrict__ out,
const int outW ) {
_cupy_convolve1D2O<thrust::complex<float>>( inp, inpW, kernel, kerW, kerH, mode, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_convolve1D2O_complex128( const thrust::complex<double> *__restrict__ inp,
const int inpW,
const thrust::complex<double> *__restrict__ kernel,
const int kerW,
const int kerH,
const int mode,
thrust::complex<double> *__restrict__ out,
const int outW ) {
_cupy_convolve1D2O<thrust::complex<double>>( inp, inpW, kernel, kerW, kerH, mode, out, outW );
}
///////////////////////////////////////////////////////////////////////////////
// CONVOLVE 1D3O //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_convolve1D3O( const T *__restrict__ inp,
const int inpW,
const T *__restrict__ kernel,
const int kerW,
const int kerH,
const int kerD,
const int mode,
T *__restrict__ out,
const int outW ) {
const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int stride { static_cast<int>( blockDim.x * gridDim.x ) };
for ( int tid = tx; tid < outW; tid += stride ) {
T temp {};
if ( mode == 0 ) { // Valid
if ( tid >= 0 && tid < inpW ) {
for ( int i = 0; i < kerW; i++ ) {
for ( int j = 0; j < kerH; j++ ) {
for ( int k = 0; k < kerD; k++ ) {
temp += inp[tid + kerW - i - 1] * inp[tid + kerH - j - 1] * inp[tid + kerD - k - 1] * kernel[ (kerW * i + j) * kerH + k ];
}
}
}
}
}
out[tid] = temp;
}
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D3O_int32( const int *__restrict__ inp,
const int inpW,
const int *__restrict__ kernel,
const int kerW,
const int kerH,
const int kerD,
const int mode,
int *__restrict__ out,
const int outW ) {
_cupy_convolve1D3O<int>( inp, inpW, kernel, kerW, kerH, kerD, mode, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D3O_int64( const long int *__restrict__ inp,
const int inpW,
const long int *__restrict__ kernel,
const int kerW,
const int kerH,
const int kerD,
const int mode,
long int *__restrict__ out,
const int outW ) {
_cupy_convolve1D3O<long int>( inp, inpW, kernel, kerW, kerH, kerD, mode, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D3O_float32( const float *__restrict__ inp,
const int inpW,
const float *__restrict__ kernel,
const int kerW,
const int kerH,
const int kerD,
const int mode,
float *__restrict__ out,
const int outW ) {
_cupy_convolve1D3O<float>( inp, inpW, kernel, kerW, kerH, kerD, mode, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D3O_float64( const double *__restrict__ inp,
const int inpW,
const double *__restrict__ kernel,
const int kerW,
const int kerH,
const int kerD,
const int mode,
double *__restrict__ out,
const int outW ) {
_cupy_convolve1D3O<double>( inp, inpW, kernel, kerW, kerH, kerD, mode, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_convolve1D3O_complex64( thrust::complex<float> *__restrict__ inp,
const int inpW,
thrust::complex<float> *__restrict__ kernel,
const int kerW,
const int kerH,
const int kerD,
const int mode,
thrust::complex<float> *__restrict__ out,
const int outW ) {
_cupy_convolve1D3O<thrust::complex<float>>( inp, inpW, kernel, kerW, kerH, kerD, mode, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_convolve1D3O_complex128( const thrust::complex<double> *__restrict__ inp,
const int inpW,
const thrust::complex<double> *__restrict__ kernel,
const int kerW,
const int kerH,
const int kerD,
const int mode,
thrust::complex<double> *__restrict__ out,
const int outW ) {
_cupy_convolve1D3O<thrust::complex<double>>( inp, inpW, kernel, kerW, kerH, kerD, mode, out, outW );
}
|
179
|
/*
LICENSE: this code is subject to the license listed at
http://www.amolf.nl/~vanmeel/mdgpu/download.html
Among other restrictions, this code is released under the GNU Public License (GPL).
Authors:
A. Arnold (original)
Kipton Barros (modifications)
----
Generate pseudo-random numbers using a linear congruential generator. The generated
random numbers are identical to those produced by the lrand48() provided by the
C standard library.
Usage:
// From host, allocate the Rand48 structure, pass it to CUDA, and release it.
// The random sequence is persistent across CUDA kernel calls.
void hostFunction() {
rng = new Rand48();
rng->init(GRID_DIM*BLOCK_DIM, SEED);
cudaFunction1 <<<GRID_DIM, BLOCK_DIM, sharedMem>>> (*rng);
cudaFunction2 <<<GRID_DIM, BLOCK_DIM, sharedMem>>> (*rng);
rng->destroy();
delete rng;
}
// From CUDA, load the random state from device memory into local registers,
// generate random numbers, and finally store state back to device memory.
// Note that the random state, rng, is stored in registers, and is being updated
// with each device call.
__global__ void cudaFunction1(Rand48 rng) {
rand48_loadState(rng);
...
rand48_nextInt(rng);
...
rand48_storeState(rng);
}
*/
struct Rand48 {
// strided iteration constants (48-bit, distributed on 2x 24-bit)
uint2 A, C;
// CUDA array -- random numbers for all threads
uint2 *state;
// random number for a single thread (used by CUDA device functions only)
uint2 state0;
// magic constants for rand48
static const unsigned long long a = 0x5DEECE66DLL, c = 0xB;
void init(int nThreads, int seed) {
uint2* seeds = new uint2[ nThreads ];
cudaMalloc((void**) &state, sizeof(uint2)*nThreads);
// calculate strided iteration constants
unsigned long long A, C;
A = 1LL; C = 0LL;
for (unsigned int i = 0; i < nThreads; ++i) {
C += A*c;
A *= a;
}
this->A.x = A & 0xFFFFFFLL;
this->A.y = (A >> 24) & 0xFFFFFFLL;
this->C.x = C & 0xFFFFFFLL;
this->C.y = (C >> 24) & 0xFFFFFFLL;
// prepare first nThreads random numbers from seed
unsigned long long x = (((unsigned long long)seed) << 16) | 0x330E;
for (unsigned int i = 0; i < nThreads; ++i) {
x = a*x + c;
seeds[i].x = x & 0xFFFFFFLL;
seeds[i].y = (x >> 24) & 0xFFFFFFLL;
}
cudaMemcpy(state, seeds, sizeof(uint2)*nThreads, cudaMemcpyHostToDevice);
delete[] seeds;
}
void destroy() {
cudaFree((void*) state);
}
};
__device__ inline void rand48_loadState(Rand48 &r) {
int i = ((blockIdx.y*gridDim.x + blockIdx.x) * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
r.state0 = r.state[i];
}
__device__ inline void rand48_storeState(Rand48 &r) {
int i = ((blockIdx.y*gridDim.x + blockIdx.x) * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
r.state[i] = r.state0;
}
__device__ inline void rand48_iterate(Rand48 &r) {
// state0 is 2x 24bit to handle overflows optimally, i.e.
// in one operation.
// the multiplication commands however give the low and hi 32 bit,
// which have to be converted as follows:
// 48bit in bytes = ABCD EF (space marks 32bit boundary)
// R0 = ABC
// R1 = D EF
unsigned int R0, R1;
// low 24-bit multiplication
const unsigned int lo00 = __umul24(r.state0.x, r.A.x);
const unsigned int hi00 = __umulhi(r.state0.x, r.A.x);
// 24bit distribution of 32bit multiplication results
R0 = (lo00 & 0xFFFFFF);
R1 = (lo00 >> 24) | (hi00 << 8);
R0 += r.C.x; R1 += r.C.y;
// transfer overflows
R1 += (R0 >> 24);
R0 &= 0xFFFFFF;
// cross-terms, low/hi 24-bit multiplication
R1 += __umul24(r.state0.y, r.A.x);
R1 += __umul24(r.state0.x, r.A.y);
R1 &= 0xFFFFFF;
r.state0 = make_uint2(R0, R1);
}
__device__ inline int rand48_nextInt(Rand48 &r) {
// get upper 31 (!) bits of the 2x 24bits
int res = ( r.state0.x >> 17 ) | ( r.state0.y << 7 );
rand48_iterate(r);
return res;
}
// returns a float in the range [0, 1)
__device__ inline float rand48_nextFloat(Rand48 &r) {
// use only upper 24 bits since floating point has 24 bit significand
// (ref: Java random documentation)
float res = r.state0.y / (float)(1<<24);
rand48_iterate(r);
return res;
}
|
180
|
__global__ void gaussian_blur(const unsigned char *inputChannel, unsigned char *outputChannel,
const unsigned int width, const unsigned int height,
const float *gaussianKernel, const unsigned int filterWidth) {
const unsigned int row = threadIdx.y + blockIdx.y * blockDim.y;
const unsigned int col = threadIdx.x + blockIdx.x * blockDim.x;
if(row < height && col < width) {
const int filterHalf = filterWidth / 2;
float blur = 0.0;
for(int i = -filterHalf; i <= filterHalf; i++) {
for(int j = -filterHalf; j <= filterHalf; j++) {
const unsigned int y = max(0, min(height - 1, row + i));
const unsigned int x = max(0, min(width - 1, col + j));
const float w = gaussianKernel[(j + filterHalf) + (i + filterHalf) * filterWidth];
blur += w * inputChannel[x + y * width];
}
}
outputChannel[col + row * width] = static_cast<unsigned char>(blur);
}
}
|
181
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float* var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16) {
for (int i=0; i < var_1; ++i) {
comp = (var_3 / (+1.7978E34f - (var_4 - (var_5 * +1.5645E-8f))));
var_2[i] = atanf((-0.0f * (+0.0f + (var_6 / -1.8126E-23f - var_7 - var_8))));
float tmp_1 = var_9 / +1.5703E9f;
comp = tmp_1 * var_2[i] * (var_10 - +1.4446E-36f);
if (comp >= +1.7516E36f / (var_11 / (-1.0045E36f / var_12))) {
comp += +1.8739E36f * (var_13 * (+1.5092E15f + +1.8891E-35f));
float tmp_2 = (-1.1125E-41f / cosf(-1.7053E-44f * +1.7129E-42f));
float tmp_3 = -1.1475E-37f / (var_14 / var_15);
comp = tmp_3 + tmp_2 + (var_16 / +1.9488E-30f);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float* tmp_3 = initPointer( atof(argv[3]) );
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17);
cudaDeviceSynchronize();
return 0;
}
|
182
|
#include<cstdio>
#include<memory>
#include<vector>
#include<functional>
#include<iostream>
using namespace std;
using fp = void(*)(int*);
__global__ void
test(int *d_data){
printf("hello world\n");
for(int i = 0;i<10;i++)
printf("%d:%d\n",i,d_data[i]);
}
int uniquePtr(){
cout<<"uniquePtr"<<endl;
int *d_data0;
function<void(int*)> lambda = [](int*p){cudaFree(p);};
unique_ptr<int,function<void(int*)>> d_data{nullptr, lambda};
cudaMalloc((void**)&d_data0,sizeof(int)*10);
d_data.reset(d_data0);
//交给unique_ptr做指针维护
// unique_ptr 的生命周期要与cudaDeviceReset一起考虑,
//cudaDeviceReset是将上下文都重置,如果之前并未执行cudaFree则会造成内存泄漏
// 但是,如果不调用cudaDeviceReset,其会在main函数生命周期之后执行
int h_data[10] = {1,2,3,4,5,6,7,8,9,10};
cudaMemcpy(d_data.get(),h_data,sizeof(int)*10,cudaMemcpyHostToDevice);
test<<<1,1>>>(d_data.get());
cudaDeviceSynchronize();
return 0;
}
int uniquePtr1(){
cout<<"uniquePtr1"<<endl;
function<void(int*)> lambda = [](int*p){cudaFree(p);};
vector<unique_ptr<int,function<void(int*)>> > vec;
for(int i=0; i<2;i++){
vec.emplace_back(nullptr,lambda);
int* tmp;
cudaMalloc((void**)&tmp,sizeof(int)*10);
vec[i].reset(tmp);
int h_data[10] = {1,2,3,4,5,6,7,8,9,10};
cudaMemcpy(vec[i].get(),h_data,sizeof(int)*10,cudaMemcpyHostToDevice);
test<<<1,1>>>(vec[i].get());
}
cudaDeviceSynchronize();
return 0;
}
int normal(){
cout<<"normal"<<endl;
int *d_data;
// 故意缺少cudaFree,调用cuda-memcheck
cudaMalloc((void**)&d_data,sizeof(int)*10);
int h_data[10] = {1,2,3,4,5,6,7,8,9,10};
cudaMemcpy(d_data,h_data,sizeof(int)*10,cudaMemcpyHostToDevice);
test<<<1,1>>>(d_data);
cudaDeviceSynchronize();
return 0;
}
int main(){
#ifdef UNIQUE
uniquePtr();
cout<<"-------------"<<endl;
uniquePtr1();
#else
normal();
#endif
//一定要加上这句,不然底层context会自己帮忙释放未释放的内存,
//显示调用就意味着内存需要手动自己释放
cudaDeviceReset();
return 0;
}
|
183
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime_api.h>
#define BASE_TYPE float
__global__ void dot_produce(const BASE_TYPE *a, const BASE_TYPE *b, BASE_TYPE *result, const int N)
{
extern __shared__ BASE_TYPE s[];
int index = blockDim.x * blockIdx.x + threadIdx.x;
s[threadIdx.x] = a[index] * b[index];
__syncthreads();
if (threadIdx.x == 0) {
for (int i = 1; i < blockDim.x; i++)
s[0] += s[i];
result[blockIdx.x] = s[0];
}
}
BASE_TYPE* gen_array(const int N)
{
BASE_TYPE *a = new BASE_TYPE[N];
for (int i = 0; i < N; i++)
{
a[i] = i;
}
return a;
}
void print_vector(BASE_TYPE *a, const int N)
{
for (int i = 0; i < N; i++)
printf("%3.0f ", a[i]);
printf("\n");
}
void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size)
{
cudaError_t err;
err = cudaMalloc((void **)dev, size);
if (err != cudaSuccess)
throw err;
if (host != NULL)
{
err = cudaMemcpy(*dev, host, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
throw err;
}
}
void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int threadsPerBlock, const int N)
{
*grid = dim3(1);
*block = dim3(N);
printf("Block %d %d %d\n", block->x, block->y, block->z);
printf("Grid %d %d %d\n", grid->x, grid->y, grid->z);
}
int main()
{
const int N = 10;
const int threadsPerBlock = N;
const size_t size = N * sizeof(BASE_TYPE);
const size_t result_size = size / threadsPerBlock;
cudaError_t err;
dim3 blockDim, gridDim;
cuda_init_grid_and_block(&blockDim, &gridDim, threadsPerBlock, N);
BASE_TYPE *host_a = gen_array(N), *host_b = gen_array(N);
BASE_TYPE *dev_a, *dev_b, *dev_c;
BASE_TYPE result;
print_vector(host_a, N);
print_vector(host_b, N);
try
{
cuda_init_array(&dev_a, host_a, size);
cuda_init_array(&dev_b, host_b, size);
cuda_init_array(&dev_c, NULL, sizeof(BASE_TYPE));
}
catch (cudaError_t err)
{
fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
dot_produce<<<blockDim, gridDim, threadsPerBlock * sizeof(BASE_TYPE)>>>(dev_a, dev_b, dev_c, N);
err = cudaMemcpy(&result, dev_c, result_size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("%4.2f\n", result);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
delete[] host_a;
delete[] host_b;
return 0;
}
|
184
|
// dacrtplane. A GPU ray tracer using a divide and conquor strategy instead of
// partitioning the geometry into a hierarchy.
// -----------------------------------------------------------------------------
// Copyright (C) 2012, See authors
//
// This program is open source and distributed under the New BSD License. See
// license for more detail.
// -----------------------------------------------------------------------------
#include <iostream>
#undef THRUST_DEBUG
#define THRUST_DEBUG 1
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/version.h>
using std::cout;
using std::endl;
/**
* similar issues
* https://groups.google.com/forum/#!topic/thrust-users/0pZwBjT0n14
*/
int main(int argc, char *argv[]){
cout << "Thrust v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION << endl;
{
const int N = 7;
thrust::host_vector<int> A(N);
A[0] = 1; A[1] = 3; A[2] = 3; A[3] = 3; A[4] = 2; A[5] = 2; A[6] = 1;
thrust::host_vector<int> B(N);
B[0] = 9; B[1] = 8; B[2] = 7; B[3] = 6; B[4] = 5; B[5] = 4; B[6] = 3;
thrust::host_vector<int> C(N);
thrust::host_vector<int> D(N);
thrust::equal_to<int> binary_pred;
thrust::plus<int> binary_op;
thrust::reduce_by_key(A.begin(), A.end(), B.begin(), C.begin(), D.begin(),
binary_pred, binary_op);
cout << C[0] << ", " << D[0] << endl;
cout << C[1] << ", " << D[1] << endl;
cout << C[2] << ", " << D[2] << endl;
cout << C[3] << ", " << D[3] << endl;
}
{
const int N = 7;
thrust::device_vector<int> A(N);
A[0] = 1; A[1] = 3; A[2] = 3; A[3] = 3; A[4] = 2; A[5] = 2; A[6] = 1;
thrust::device_vector<int> B(N);
B[0] = 9; B[1] = 8; B[2] = 7; B[3] = 6; B[4] = 5; B[5] = 4; B[6] = 3;
thrust::device_vector<int> C(N);
thrust::device_vector<int> D(N);
thrust::equal_to<int> binary_pred;
thrust::plus<int> binary_op;
thrust::reduce_by_key(A.begin(), A.end(), B.begin(), C.begin(), D.begin(),
binary_pred, binary_op);
cout << C[0] << ", " << D[0] << endl;
cout << C[1] << ", " << D[1] << endl;
cout << C[2] << ", " << D[2] << endl;
cout << C[3] << ", " << D[3] << endl;
}
return 0;
}
|
185
|
#include <stdio.h>
#include <vector>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <cuda.h>
using namespace std;
#define THREADS 64
__global__ void last_digits(int* mod, int* data, int n) {
int thid = blockIdx.x * blockDim.x + threadIdx.x;
if(thid < n) {
mod[thid] = data[thid]%10;
}
}
__global__ void min_reduction(int * data, int* results, int n) {
extern __shared__ int temp[];
int thid = blockIdx.x*blockDim.x + threadIdx.x;
int lid = threadIdx.x;
if(thid < n) {
temp[lid] = data[thid];
} else temp[lid] = 1000;
__syncthreads();
for(int offset = blockDim.x>>1; offset > 0; offset >>= 1) {
__syncthreads();
if(lid < offset) {
if(temp[lid + offset] < temp[lid]) {
temp[lid] = temp[lid + offset];
}
}
}
if(lid == 0) {
results[blockIdx.x] = temp[0];
}
}
int main(int argc,char **argv) {
vector<int> array;
int i = 0;
ifstream file( "inp.txt" );
int number;
while(file>>number) {
array.push_back(number);
i++;
if (file.peek() == ',')
file.ignore();
}
int* data = new int[array.size()];
int* mod = new int[array.size()];
int* d_data;
int* d_mod;
for(int a = 0; a < array.size(); a++) {
data[a] = array[a];
}
int size = sizeof(int)*array.size();
cudaMalloc((void **) &d_data, size);
cudaMalloc((void **) &d_mod, size);
cudaMemcpy(d_data, data, size, cudaMemcpyHostToDevice);
int sizing = array.size()/THREADS;
if(array.size()%THREADS > 0) {
sizing++;
}
int* inter = new int[array.size()];
int* d_inter;
int blockSize = sizeof(int)*THREADS;
cudaMalloc((void **) &d_inter, size);
// first reduction
min_reduction<<<sizing, THREADS, blockSize>>>(d_data, d_inter, array.size());
cudaMemcpy(inter, d_inter, size, cudaMemcpyDeviceToHost);
// second reduction
int* results = new int[array.size()];
int* d_results;
cudaMalloc((void **) &d_results, size);
min_reduction<<<sizing, THREADS, blockSize>>>(d_inter, d_results, sizing);
cudaMemcpy(results, d_results, blockSize, cudaMemcpyDeviceToHost);
// last digits of array
last_digits<<<sizing, THREADS>>>(d_mod, d_data, array.size());
cudaMemcpy(mod, d_mod, size, cudaMemcpyDeviceToHost);
FILE *fp = fopen("q1b.txt", "w");
if(fp != NULL) {
for(int a = 0; a < array.size(); a++) {
fprintf(fp, "%d", mod[a]);
if(a + 1 < array.size()) {
fprintf(fp, ", ");
}
}
fclose(fp);
}
fp = fopen("q1a.txt", "w");
if(fp != NULL && array.size() > 0) {
fprintf(fp, "%d", results[0]);
fclose(fp);
}
cudaFree(d_data);
cudaFree(d_inter);
cudaFree(d_results);
cudaFree(d_mod);
// force the printf()s to flush
cudaDeviceSynchronize();
return 0;
}
|
186
|
#include "includes.h"
__global__ void set_segmented_nnz_num(int *d_rpt, int *d_col, int *d_nnz_num, int *d_group_seg, int *d_offset, size_t seg_size, size_t seg_num, int M, int pad_M, int group_num_col)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
int width = d_rpt[i + 1] - d_rpt[i];
int g, j;
int col;
int offset = d_rpt[i];
int index;
for (j = 0; j < width; j++) {
index = offset + j;
col = d_col[index];
g = col / seg_size;
d_offset[index] = d_nnz_num[g * pad_M + i];
d_nnz_num[g * pad_M + i]++;
d_group_seg[index] = g;
}
}
|
187
|
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
extern "C" void cudaInit(size_t sizeA);
extern "C" void cudaFinalize();
extern "C" void putGPU(void* h_A, size_t sizeA);
extern "C" void getGPU(void* h_A, size_t sizeA);
void* d_A;
void cudaInit(size_t sizeA){
//allocate memory on device
cudaMalloc( (void**) &d_A, sizeA);
}
void putGPU(void* h_A, size_t sizeA){
//copy host data from argument to device
cudaMemcpy(d_A, h_A, sizeA, cudaMemcpyHostToDevice);
}
void getGPU(void* h_A, size_t sizeA){
//copy data from device to argument array
cudaMemcpy(h_A, d_A, sizeA, cudaMemcpyDeviceToHost);
}
void cudaFinalize(){
//free device memory
cudaFree(d_A);
}
|
188
|
/*
*
* compiling:
* nvcc -lglut -LGLEW life.cuda.cu -o life -g -G
*
* -g -G - debug options
*
* for it's work:
* export LD_LIBRARY_PATH=:/usr/local/cuda/lib
* export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/libnvvp/
*
* cuda-gdb
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#define FIELD_WIDTH 10
#define FIELD_HEIGHT 10
#define NUMBER_OF_THREADS 100
float * state_first; // on PC
float * state_second; // arrays
float * dev_first_state; // on Card
float * dev_second_state; // arrays
int * dev_width;
int * dev_height;
int width = FIELD_WIDTH;
int height = FIELD_HEIGHT;
__global__ void kernel(float * first, float * second , int * width, int * height)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < (*width)*(*height))
{
int numberofneighbours = 0;
int num = 0;
// change to num += ...
/*
if(*(first + id + 1) == 1) num++;
if(*(first + id - 1) == 1) num++;
if(*(first + id + *height) == 1) num++;
if(*(first + id - *height) == 1) num++;
if(*(first + id + *height + 1) == 1) num++;
if(*(first + id + *height - 1) == 1) num++;
if(*(first + id - *height + 1) == 1) num++;
if(*(first + id - *height - 1) == 1) num++;
*/
num += *(first + id + 1);
num += *(first + id - 1);
num += *(first + id + *height);
num += *(first + id - *height);
num += *(first + id + *height + 1);
num += *(first + id + *height - 1);
num += *(first + id - *height + 1);
num += *(first + id - *height - 1);
switch(num)
{
case 3 : *(second + id) = 1; break;
case 2 : if(*(first + id) == 1) *(second + id) = 1; break;
default : *(second + id) = 0; break;
}
}
}
void GetDataFromCudaDevice(int width, int height)
{
cudaMemcpy(state_first,dev_second_state,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
}
void CopyDataToCudaDevice(int width, int height)
{
cudaMemcpy(dev_first_state,state_first,sizeof(float)*width*height,cudaMemcpyHostToDevice);
cudaMemset(dev_second_state,0,sizeof(float)*width*height);
cudaMemcpy(dev_width,&width,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_height,&height,sizeof(int),cudaMemcpyHostToDevice);
}
void InitCudaArrays(int width, int height)
{
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return;
}
cudaMalloc((void**)&dev_width,sizeof(int));
cudaMalloc((void**)&dev_height,sizeof(int));
cudaMalloc((void**)&dev_first_state,sizeof(float)*width*height);
cudaMalloc((void**)&dev_second_state,sizeof(float)*width*height);
}
void RunCudaDevice()
{
cudaError_t cudaStatus;
int threads = NUMBER_OF_THREADS;
int blocks = (width*height)/(NUMBER_OF_THREADS + 1);
// kernel <<<blocks,threads>>> (dev_first_state,dev_second_state,dev_width,dev_height);
kernel <<<10,10>>> (dev_first_state,dev_second_state,dev_width,dev_height);
cudaDeviceSynchronize();
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return;
}
GetDataFromCudaDevice(FIELD_WIDTH,FIELD_HEIGHT);
}
void FreeCudaDevice(int width, int height)
{
cudaFree(dev_first_state);
cudaFree(dev_second_state);
cudaFree(dev_width);
cudaFree(dev_height);
}
void FillField()
{
/*
*
* 01010
* 00110
* 00100
* 00000
*
*/
// two planers and a dot
state_first[9*width+9] = 1;
state_first[2*width+1] = 1;
state_first[2*width+2] = 1;
state_first[3*width+2] = 1;
state_first[3*width+3] = 1;
state_first[1*width+3] = 1;
state_first[7*width+1] = 1;
state_first[7*width+2] = 1;
state_first[8*width+2] = 1;
state_first[8*width+3] = 1;
state_first[6*width+3] = 1;
}
// allocate memory and initialize array with '0'
void InitArrays(int width, int height)
{
state_first = (float *) malloc(sizeof(float)*width*height);
state_second = (float *) malloc(sizeof(float)*width*height);
memset(state_first,0,sizeof(float)*width*height);
memset(state_second,0,sizeof(float)*width*height);
}
void ShowArray(int width, int height)
{
puts("-----------------");
for(int i=0;i<width;i++)
{
for(int j=0;j<height;j++)
{
if(state_first[i*width+j] != 0)printf("*");
else printf(" ");
// printf("%1.0f",state_first[i*width+j]);
}
printf("\n");
}
puts("-----------------");
}
int main()
{
InitArrays(FIELD_WIDTH,FIELD_HEIGHT);
FillField();
ShowArray(FIELD_WIDTH,FIELD_HEIGHT);
InitCudaArrays(FIELD_WIDTH,FIELD_HEIGHT);
CopyDataToCudaDevice(FIELD_WIDTH,FIELD_HEIGHT);
// printf("ok\n");
RunCudaDevice();
// GetDataFromCudaDevice(FIELD_WIDTH,FIELD_HEIGHT);
FreeCudaDevice(FIELD_WIDTH,FIELD_HEIGHT);
ShowArray(FIELD_WIDTH,FIELD_HEIGHT);
char ch;
scanf("%c",&ch);
}
|
189
|
#include "includes.h"
__global__ void LinearFunctionKernelDouble(double a1, double a0, double* input, double* output, int size)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
if(id < size)
{
double x = input[id];
output[id] = a1 * x + a0;
}
}
|
190
|
#include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
// to compile:
// nvcc -O0 -o transpose transpose.cu -lm
//
// to run:
// ./transpose 1024
// assume going forward 32x32 threads in each thread-block
#define BDIM 32
// reference "copy" kernel
__global__ void copy(int N,
const float * __restrict__ A,
float * __restrict__ AT){
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int idy = threadIdx.y + blockDim.y*blockIdx.y;
// output
if(idx<N && idy<N){
AT[idx+idy*N] = A[idx+idy*N];
}
}
// naive CUDA transpose kernel
__global__ void transposeV1(int N,
const float * __restrict__ A,
float * __restrict__ AT){
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
const int idy = threadIdx.y + blockDim.y*blockIdx.y;
// output
if(idx<N && idy<N){
AT[idx+idy*N] = A[idy+idx*N]; // read A non-coalesced, write AT as coalesced
}
}
// shared memory CUDA transpose kernel
__global__ void transposeV2(int N,
const float * __restrict__ A,
float * __restrict__ AT){
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
const int idy = threadIdx.y + blockDim.y*blockIdx.y;
__shared__ float s_A[BDIM][BDIM];
// check this is a legal matrix entry
if(idx<N && idy<N){
s_A[threadIdx.y][threadIdx.x] = A[idx+idy*N]; // coalesced reads
}
// make sure all threads in this thread-block
// have read into shared
__syncthreads();
// find coordinates of thread in transposed block
const int idxT = threadIdx.x + blockDim.y*blockIdx.y;
const int idyT = threadIdx.y + blockDim.x*blockIdx.x;
// output
if(idxT<N && idyT<N){
AT[idxT+idyT*N] = s_A[threadIdx.x][threadIdx.y];
}
}
// shared memory CUDA transpose kernel with padding to avoid smem bank conflicts
__global__ void transposeV3(int N,
const float * __restrict__ A,
float * __restrict__ AT){
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
const int idy = threadIdx.y + blockDim.y*blockIdx.y;
// pad by 1 to avoid 32-width bank-conflicts
__shared__ float s_A[BDIM][BDIM+1];
// check this is a legal matrix entry
if(idx<N && idy<N){
s_A[threadIdx.y][threadIdx.x] = A[idx+idy*N];
}
// ensure all threads in thread-block finish
__syncthreads();
// find coordinates of thread in transposed block
const int idxT = threadIdx.x + blockDim.y*blockIdx.y;
const int idyT = threadIdx.y + blockDim.x*blockIdx.x;
// output
if(idxT<N && idyT<N){
AT[idxT+idyT*N] = s_A[threadIdx.x][threadIdx.y];
}
}
int main(int argc, char **argv){
int N = 2048;
float *A = (float*) calloc(N*N, sizeof(float));
float *AT = (float*) calloc(N*N, sizeof(float));
printf("N=%d\n", N);
for(int i=0;i<N;++i){
for(int j=0;j<N;++j){
A[j+i*N] = j;
}
}
float *c_A, *c_AT;
size_t sz = N*N*sizeof(float); // size of matrix
cudaMalloc(&c_A, sz);
cudaMalloc(&c_AT, sz);
cudaMemcpy(c_A, A, sz, cudaMemcpyHostToDevice);
int Nblocks = (N+BDIM-1)/BDIM; // nearest Nblocks such that Nblocks * BDIM > N
dim3 threadsPerBlock(BDIM,BDIM,1);
dim3 blocks(Nblocks,Nblocks,1);
copy <<< blocks,threadsPerBlock >>> (N,c_A,c_AT);
transposeV1 <<< blocks, threadsPerBlock >>> (N, c_A, c_AT);
transposeV2 <<< blocks, threadsPerBlock >>> (N, c_A, c_AT);
transposeV3 <<< blocks, threadsPerBlock >>> (N, c_A, c_AT);
cudaMemcpy(AT, c_AT, sz, cudaMemcpyDeviceToHost);
// --------------------------------------------------------------------------------
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess){
fprintf(stderr, "CUDA ERROR: %s\n",
cudaGetErrorString(err));
}
}
|
191
|
#include "includes.h"
__global__ void transposeKernel(float *inData, float *outData)
{
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
/* Copying data into shared memory - each thread copies 4 elements : read & write coalesced */
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y + j][threadIdx.x] = inData[(y+j) * width + x];
__syncthreads();
/* x,y modified according to the new transposed matrix */
x = blockIdx.y * TILE_DIM + threadIdx.x;
y = blockIdx.x * TILE_DIM + threadIdx.y;
/* Copying data to output array - each thread copies 4 elemets : read & write coalesced */
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
outData[(y+j) * width + x] = tile[threadIdx.x][threadIdx.y + j];
}
|
192
|
// #include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cstdio>
#include <cmath>
#include <iostream>
namespace {
template <typename scalar_t>
__device__ __forceinline__ void single_mul(
scalar_t x_re,
scalar_t x_im,
scalar_t y_re,
scalar_t y_im,
scalar_t* out_re,
scalar_t* out_im) {
scalar_t uavc = x_re * (y_re + y_im);
*out_re += uavc - (x_re + x_im) * y_im;
*out_im += (x_im - x_re) * y_re + uavc;
}
template <typename scalar_t>
__device__ __forceinline__ void single_add(
scalar_t x_re,
scalar_t x_im,
scalar_t y_re,
scalar_t y_im,
scalar_t* out_re,
scalar_t* out_im) {
*out_re += x_re + y_re;
*out_im += x_im + y_im;
}
/**
Complex multiplication of tensors using shared memory and barrier
synchronization.
Compute the element wise complex multiplication for each thread in the block and
write the result to the shared memory. Then synchronize the threads and in the
log based fashion sum up the results for each output pixel through its channels,
if they are present in the cache. The stride is the number of threads per block
times the I (the two float representation of the complex numbers).
*/
template <typename scalar_t>
__global__ void complex_mul_cuda_kernel(
const scalar_t* __restrict__ x,
const scalar_t* __restrict__ y,
scalar_t* __restrict__ out,
const int N, const int F, const int C, const int H, const int W) {
// The size of the shared memory cache should be twice the number of threads
// per block as we store the real and imaginary part of the result.
extern __shared__ float cache[]; // cache for the result of the complex multiplication
const int I = 2; // the last dimension for the complex number
const int plane_size = H * W;
const int channel_size = plane_size * I;
const int image_size = C * channel_size; // size of the image from the batch
// number of complex values in the input that we iterate through
const int nr_values = C * H * W;
const int n = blockIdx.x; // current index of an image/input map in the batch
const int f = blockIdx.y; // current index of a filter from the filter bank
const int block_size = blockDim.x;
const int thread_nr = threadIdx.x;
// stride for the H*W map is equal to the number of threads declared in a block
const int stride = block_size * I; // we need H*W threads per plane, each deals with I numbers
const int n_idx = n * image_size; // start index in the batch for this input map
const int f_idx = f * image_size; // start index in the bank for this filter
// find index for the output
const int no_idx = n * (F * channel_size); // output index for the batch data point
const int fo_idx = f * channel_size; // output index for the filter/channel
// Each H*W plane contains H*W*I elements in depth.
// We linearize it and start from 0, move by #threads*I steps in outer loop.
const int start_idx = threadIdx.x*I;
// index in the input map
int N_idx = n_idx + start_idx; // index across the first channel plane (in the input map n).
const int last_N_idx = n_idx + image_size; // last index for the starting position to compute the sum through each channel for this pixel
// To prevent us from a deadlock, we have to always execute __syncthreads();
// for all the threads in the block. Each thread has to do the same number of
// iterations for any loop. To ensure that, we keep all threads running,
// even though, some of them are really idle. We keep the loop running to
// the multiple of the block size that is greater than the number of values
// in the input map in total: C*H*W - this is a number of complex cells in the
// input map.
const int num_blocks = (nr_values + block_size - 1) / block_size;
const int last_block_idx = n_idx + num_blocks * block_size * I;
// index in the filter
int F_idx = f_idx + start_idx;
// index in the output, we compute cells on a flat plane (no channels)
int base_O_idx = no_idx + fo_idx;
int run_O_idx = (start_idx % channel_size);
int thread_cidx = thread_nr * I;
printf("N_idx:%d, last_block_idx:%d, last_N_idx:%d\n", N_idx, last_block_idx, last_N_idx);
while (N_idx < last_block_idx) {
// Zero out caches.
cache[thread_cidx] = 0;
cache[thread_cidx + 1] = 0;
if (N_idx < last_N_idx - 1) {
scalar_t out_re = 0;
scalar_t out_im = 0;
scalar_t x_re = x[N_idx];
scalar_t x_im = x[N_idx + 1];
scalar_t y_re = y[F_idx];
scalar_t y_im = y[F_idx + 1];
single_mul(x_re, x_im, y_re, y_im, &out_re, &out_im);
cache[thread_cidx] = out_re;
cache[thread_cidx + 1] = out_im;
}
__syncthreads(); // Make the results visible to all threads.
// It is not O(logN) but O(N) as of now. For each element in the output
// map we have a dedicated thread. The thread goes through all the
// channels present in the cache.
if (thread_nr < plane_size) {
for (int cache_index = thread_nr + plane_size;
cache_index < block_size;
cache_index += plane_size) {
cache[thread_cidx] += cache[cache_index*I];
cache[thread_cidx + 1] += cache[cache_index*I + 1];
}
// Move the summed values (across the channels) for each pixel to
// the output.
const int O_idx = base_O_idx + run_O_idx;
out[O_idx] += cache[thread_cidx];
out[O_idx + 1] = cache[thread_cidx + 1];
}
N_idx += stride;
F_idx += stride;
run_O_idx = (run_O_idx + stride) % channel_size;
// Make sure that all cache cells are zeroed out before moving on.
// We need this as in the second part we access cache cells that do not
// belong only to this thread.
__syncthreads();
}
}
} // namespace
//void complex_mul_stride_no_permute_cuda(
// at::Tensor x,
// at::Tensor y,
// at::Tensor out,
// int threads = 1024) {
//
// const auto N = x.size(0); // batch_size
// const auto F = y.size(0); // filter_bank_size
// const auto C = x.size(1); // number of channels
// const auto H = x.size(2); // height of the matrix
// const auto W = x.size(3); // width of the matrix
//
// const auto x_blocks = N;
// const auto y_blocks = F;
// const dim3 blocks(x_blocks, y_blocks);
//
// AT_DISPATCH_FLOATING_TYPES(x.type(), "complex_mul_cuda",
// ([&] {
// complex_mul_cuda_kernel<scalar_t><<<blocks, threads>>>(
// x.data<scalar_t>(), y.data<scalar_t>(), out.data<scalar_t>(),
// N, F, C, H, W);
// }));
//}
//template <typename scalar_t>
//void complex_mul_stride_no_permute_cuda_pure(
// at::Tensor x,
// at::Tensor y,
// at::Tensor out,
// int threads = 1024) {
//
// const auto N = x.size(0); // batch_size
// const auto F = y.size(0); // filter_bank_size
// const auto C = x.size(1); // number of channels
// const auto H = x.size(2); // height of the matrix
// const auto W = x.size(3); // width of the matrix
//
// const auto x_blocks = N;
// const auto y_blocks = F;
// const dim3 blocks(x_blocks, y_blocks);
//
// // Run kernel on the GPU
// complex_mul_cuda_kernel<scalar_t><<<blocks, 1024>>>(
// x.data<scalar_t>(), y.data<scalar_t>(), out.data<scalar_t>(),
// N, F, C, H, W);
//}
/**
Uncomment the pytorch related stuff.
Compile:
ady@skr-compute1:/tmp/pycharm_project_154/cnns/nnlib/pytorch_cuda/complex_mul_cuda$ nvcc complex_mul_kernel_stride_no_permute.cu -o complex_mul_profile.out
ady@skr-compute1:/tmp/pycharm_project_154/cnns/nnlib/pytorch_cuda/complex_mul_cuda$ nvprof ./complex_mul_profile.out
nvidia
/usr/local/cuda/bin/nvcc -I/local/ady/anaconda3/lib/python3.6/site-packages/torch/lib/include -I/local/ady/anaconda3/lib/python3.6/site-packages/torch/lib/include/torch/csrc/api/include -I/local/ady/anaconda3/lib/python3.6/site-packages/torch/lib/include/TH -I/local/ady/anaconda3/lib/python3.6/site-packages/torch/lib/include/THC -I/usr/local/cuda/include -I/local/ady/anaconda3/include/python3.6m -c complex_mul_kernel.cu -o complex_mul_kernel_stride_no_permute.out -std=c++11
nvcc -I/local/ady/anaconda3/lib/python3.6/site-packages/torch/lib/include -I/local/ady/anaconda3/lib/python3.6/site-packages/torch/lib/include/torch/csrc/api/include -I/local/ady/anaconda3/lib/python3.6/site-packages/torch/lib/include/TH -I/local/ady/anaconda3/lib/python3.6/site-packages/torch/lib/include/THC -I/usr/local/cuda/include -I/local/ady/anaconda3/include/python3.6m complex_mul_kernel_stride_no_permute.cu -o complex_mul_kernel_stride_no_permute.out -std=c++11
Segmentation fault
*/
int main(void)
{
int N = 1;
int F = 1;
int C = 4;
int H = 3;
int W = 2;
int I = 2;
int size_input = N * C * H * W * I;
int size_filter = F * C * H * W * I;
int size_output = N * F * H * W * I;
int cuda_block_threads = 16;
// auto dims = {128, 32, 16, 8, 2};
// at::Tensor x = at::randn({128, 32, 16, 8, 2});
// at::Tensor y = at::randn({128, 32, 16, 8, 2});
// at::Tensor out = at::zeros({128, 32, 16, 8, 2});
float *x, *y, * out;
// Allocate unified memory - accessible from cpu or gpu
cudaMallocManaged(&x, size_input*sizeof(float));
cudaMallocManaged(&y, size_filter*sizeof(float));
cudaMallocManaged(&out, size_output*sizeof(float));
for (int j=0; j<H; ++j) {
for (int i=0; i<W; ++i) {
x[(j*W+i)*2] = 3;
x[(j*W+i)*2 + 1] = 1;
y[(j*W+i)*2] = 4;
y[(j*W+i)*2 + 1] = 2;
}
}
for (int i=0; i<H*W*2; i+=2) {
printf("%p %d: %f, %f, %f, %f\n", x, i, x[i], x[i+1], y[i], y[i+1]);
}
// float *dz; // device z
// cudaMalloc(&dz, 9*sizeof(float));
// cudaMemcpy(dz, hz, 9*sizeof(float), cudaMemcpyHostToDevice);
const dim3 blocks(N, F);
complex_mul_cuda_kernel<float><<<blocks, cuda_block_threads,
cuda_block_threads*2>>>(x, y, out, N, F, C, H, W);
for (int i=0; i<H*W*C; i+=2) {
printf("%d: %f, %f\n", i, out[i], out[i+1]);
}
cudaFree(x);
cudaFree(y);
cudaFree(out);
// cudaFree(dz);
printf("finished computation\n");
return 0;
}
|
193
|
#include<stdio.h>
#include<stdlib.h>
#include<ctype.h>
#include<math.h>
#include<time.h>
__device__ float edo_original(float t)
{
return 9 * (powf(t, 2)) - 4 * t + 5;
}
__global__ void euler_method_gpu(float t, float *y, float delta_t, float m)
{
int tId = threadIdx.x + blockIdx.x*blockDim.x;
if(tId < (int) m){
y[tId] = y[tId] + delta_t*(4*t - y[tId] + 3 + tId);
}
}
void euler_method(float t, float *y, int m, float delta_t);
float edo_resuelta(float t, int j);
int main(){
FILE *fp_time;
float *y, *y_dev;
clock_t start_t, end_t, total_t;
cudaEvent_t ct1, ct2;
int j, k, counter = 0;
int block_size, grid_size;
float m[5] = {powf(10, 4), powf(10, 5), powf(10, 6), powf(10, 7), powf(10, 8)};
int block_iterate[4] = {64, 128, 256, 512};
int N = (int) powf(10,3);
float delta_t = powf(10,-3);
float time[14];
float t = 0;
float dt;
cudaEventCreate(&ct1); cudaEventCreate(&ct2);
fp_time = fopen("2_time", "w");
for(k = 0 ; k < 5 ; k++)
{
// Calcular valores iniciales
y = (float*) malloc(sizeof(float)*m[k]);
cudaMalloc(&y_dev, sizeof(float)*m[k]);
for(j = 0; j < m[k]; j++){
y[j] = j;
}
// Copiarlos a GPU
cudaMemcpy(y_dev, y, m[k]*sizeof(float), cudaMemcpyHostToDevice);
// CPU CODE
start_t = clock();
for(int i = 0; i < N ; i++){
t = i*delta_t;
euler_method(t, y, m[k], delta_t);
}
end_t = clock();
total_t = end_t - start_t;
counter++;
// GPU CODE
block_size = 256;
grid_size = (int)ceil((float) m[k] / block_size);
cudaEventRecord(ct1);
for(int i = 0; i < N ; i++){
t = i*delta_t;
euler_method_gpu<<<grid_size,block_size>>>(t, y_dev, delta_t, m[k]);
}
cudaEventRecord(ct2);
cudaMemcpy(y, y_dev, m[k]*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
/*
for(j = 0; j < m[k]; j++){
fprintf(fp, "Valor obtenido = %f , Valor real = %f j = %d\n", y[j], edo_resuelta(1,j), j+1);
}
*/
time[k] = (float) 1000*total_t/CLOCKS_PER_SEC;
time[5 + k] = dt;
printf("Tiempo que demora en CPU = %f [ms] para m numero %d\n", ((float) 1000*total_t/CLOCKS_PER_SEC), counter);
printf("Tiempo que demora en GPU = %f [ms] para m numero %d\n", dt, counter);
free(y);
cudaFree(y_dev);
}
//Iteraciones para block_size
y = (float*) malloc(sizeof(float)*m[4]);
cudaMalloc(&y_dev, sizeof(float)*m[4]);
for(j = 0; j < 4; j++){
block_size = block_iterate[j];
grid_size = (int)ceil((float) m[4] / block_size);
cudaEventRecord(ct1);
for(int i = 0; i < N ; i++){
t = i*delta_t;
euler_method_gpu<<<grid_size,block_size>>>(t, y_dev, delta_t, m[4]);
}
cudaEventRecord(ct2);
cudaMemcpy(y, y_dev, m[4]*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
time[10 + j] = dt;
printf("Tiempo que demora en GPU = %f [ms] para block_size = %d\n", dt, block_size);
}
free(y);
cudaFree(y_dev);
for(int i = 0; i < 14; i++){
if((i%5 == 0) && (i != 0)){
fprintf(fp_time, "\n");
}
if(i < 10){
fprintf(fp_time, "%f %f ",m[i%5], time[i]);
}
else if(i >= 10){
fprintf(fp_time, "%d %f ",block_iterate[i%5], time[i]);
}
}
fclose(fp_time);
return 0;
}
void euler_method(float t, float *y, int m, float delta_t){
for(int j = 0; j < m ; j++){
y[j] = y[j] + delta_t*(4*t - y[j] + 3 + j);
}
}
float edo_resuelta(float t, int j)
{
return expf(-t) + 4*t - 1 + j;
}
|
194
|
#include "includes.h"
__global__ static void update_inverse_cuda (float *Ainv, float *u, int N, int rowstride, int k)
{
__shared__ float A_k[NMAX], u_shared[NMAX], Ainv_u[NMAX], Ainv_shared[NMAX];
A_k[threadIdx.x] = Ainv[k*rowstride+threadIdx.x];
u_shared[threadIdx.x] = u[threadIdx.x];
// First, compute k'th element of Ainv_u
Ainv_u[threadIdx.x] = u_shared[threadIdx.x] * A_k[threadIdx.x];
__syncthreads();
for (int n=N>>1; n>0; n = n>>1) {
float a;
if (threadIdx.x < n)
a = Ainv_u[2*threadIdx.x] + Ainv_u[2*threadIdx.x+1];
__syncthreads();
Ainv_u[threadIdx.x] = a;
__syncthreads();
}
float prefact = -1.0f/(1.0f + Ainv_u[0]);
for (int row=0; row<N; row++) {
Ainv_shared[threadIdx.x] = Ainv[row*rowstride+threadIdx.x];
__syncthreads();
Ainv_u[threadIdx.x] = u_shared[threadIdx.x] * Ainv_shared[threadIdx.x];
for (int n=N>>1; n>0; n = n>>1) {
float a;
if (threadIdx.x < n)
a = Ainv_u[2*threadIdx.x] + Ainv_u[2*threadIdx.x+1];
__syncthreads();
Ainv_u[threadIdx.x] = a;
__syncthreads();
}
__syncthreads();
// Now Ainv_u[0] has the row'th element of Ainv_u.
Ainv[row*rowstride + threadIdx.x] =
Ainv_shared[threadIdx.x] + prefact*Ainv_u[0]*A_k[threadIdx.x];
}
}
|
195
|
#include "conv.cuh"
#include <iostream>
void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
getchar();
exit( EXIT_FAILURE );
}
}
void CheckError(void)
{
#ifdef _DEBUG_PRINTS_
cudaDeviceSynchronize();
HANDLE_ERROR( cudaPeekAtLastError() );
#endif
}
__constant__ float d_kernel[81];
__inline__ __device__ uchar4 getRGBA(const uchar* fSource,
const int fImageWidth,
const int fImageHeight,
const int row,
const int col)
{
uchar4 retVal;
int ii = row<0 ? 0 : row;
int jj = col>=fImageHeight ? fImageHeight-1 : col;
int indx= 4*(jj*fImageWidth + ii);
retVal.x= fSource[indx+2];
retVal.y= fSource[indx+1];
retVal.z= fSource[indx+0];
retVal.w= fSource[indx+3];
return retVal;
}
__inline__ __device__ void setRGBA(uchar* fDestination,
const int fImageWidth,
const int fImageHeight,
const int row,
const int col,
float4 value)
{
int ii = row<0 ? 0 : row;
int jj = col>=fImageHeight ? fImageHeight-1 : col;
int indx= 4*(jj*fImageWidth + ii);
fDestination[indx+2] = value.x;
fDestination[indx+1] = value.y;
fDestination[indx+0] = value.z;
fDestination[indx+3] = value.w;
}
__global__ void convolveKernel(const uchar* fSource, int fImageWidth, int fImageHeight, uchar* fDestination, int fKernelSize)
{
extern __shared__ uchar shared[];
const int PADDING = 2*fKernelSize;
int slen = blockDim.x+PADDING;
int klen = 2*fKernelSize+1;
int gx = threadIdx.x + blockDim.x * blockIdx.x;
int gy = threadIdx.y + blockDim.y * blockIdx.y;
int sidx = 4*(threadIdx.y*slen+threadIdx.x);
uchar4 pxl = getRGBA(fSource,fImageWidth,fImageHeight,
gx-fKernelSize,gy-fKernelSize);
shared[sidx+0] = pxl.x;
shared[sidx+1] = pxl.y;
shared[sidx+2] = pxl.z;
shared[sidx+3] = pxl.w;
int ti = threadIdx.x + fKernelSize;
int tj = threadIdx.y + fKernelSize;
int lx2 = threadIdx.x + blockDim.x;
int ly2 = threadIdx.y + blockDim.y;
int gx2 = gx + blockDim.x;
int gy2 = gy + blockDim.y;
if( threadIdx.x < PADDING ) {
pxl = getRGBA(fSource,fImageWidth,fImageHeight,
gx2-fKernelSize,gy-fKernelSize);
sidx= 4*(threadIdx.y*slen+lx2);
shared[sidx+0] = pxl.x;
shared[sidx+1] = pxl.y;
shared[sidx+2] = pxl.z;
shared[sidx+3] = pxl.w;
}
if( threadIdx.y < PADDING ) {
pxl = getRGBA(fSource,fImageWidth,fImageHeight,
gx-fKernelSize,gy2-fKernelSize);
sidx= 4*(ly2*slen+threadIdx.x);
shared[sidx+0] = pxl.x;
shared[sidx+1] = pxl.y;
shared[sidx+2] = pxl.z;
shared[sidx+3] = pxl.w;
}
if( threadIdx.x < PADDING && threadIdx.y < PADDING ) {
pxl = getRGBA(fSource,fImageWidth,fImageHeight,
gx2-fKernelSize,gy2-fKernelSize);
sidx= 4*(ly2*slen+lx2);
shared[sidx+0] = pxl.x;
shared[sidx+1] = pxl.y;
shared[sidx+2] = pxl.z;
shared[sidx+3] = pxl.w;
}
__syncthreads();
// Now that the image has been read
// into shared memory completely.
// Check for image bounds and exit
if( gx >= fImageWidth || gy >= fImageHeight )
return;
sidx = 4*(tj*slen+ti);
uchar* ptr = shared + sidx;
float4 accum = {0.0f,0.0f,0.0f,0.0f};
for( int jj=-fKernelSize; jj<=fKernelSize; jj++ )
{
for( int ii=-fKernelSize; ii<=fKernelSize; ii++ )
{
int tmpidx = 4*(jj*slen+ii);
float weight= d_kernel[(fKernelSize+jj)*klen+(fKernelSize+ii)];
accum.x += weight*ptr[tmpidx+0];
accum.y += weight*ptr[tmpidx+1];
accum.z += weight*ptr[tmpidx+2];
}
}
accum.w = shared[sidx+3];
setRGBA(fDestination,fImageWidth,fImageHeight,gx,gy,accum);
}
__global__ void convolveKernel(cudaTextureObject_t fSource, int fImageWidth, int fImageHeight, uchar* fDestination, int fKernelSize)
{
extern __shared__ uchar shared[];
const int PADDING = 2*fKernelSize;
int slen = blockDim.x + PADDING;
int klen = PADDING + 1;
int gx = threadIdx.x + blockDim.x * blockIdx.x;
int gy = threadIdx.y + blockDim.y * blockIdx.y;
int sidx = 4*(threadIdx.y*slen+threadIdx.x);
uchar4 pxl = tex2D<uchar4>(fSource,gx-fKernelSize,gy-fKernelSize);
shared[sidx+0] = pxl.x;
shared[sidx+1] = pxl.y;
shared[sidx+2] = pxl.z;
shared[sidx+3] = pxl.w;
int ti = threadIdx.x + fKernelSize;
int tj = threadIdx.y + fKernelSize;
int lx2 = threadIdx.x + blockDim.x;
int ly2 = threadIdx.y + blockDim.y;
int gx2 = gx + blockDim.x;
int gy2 = gy + blockDim.y;
if( threadIdx.x < PADDING ) {
pxl = tex2D<uchar4>(fSource,gx2-fKernelSize,gy-fKernelSize);
sidx= 4*(threadIdx.y*slen+lx2);
shared[sidx+0] = pxl.x;
shared[sidx+1] = pxl.y;
shared[sidx+2] = pxl.z;
shared[sidx+3] = pxl.w;
}
if( threadIdx.y < PADDING ) {
pxl = tex2D<uchar4>(fSource,gx-fKernelSize,gy2-fKernelSize);
sidx= 4*(ly2*slen+threadIdx.x);
shared[sidx+0] = pxl.x;
shared[sidx+1] = pxl.y;
shared[sidx+2] = pxl.z;
shared[sidx+3] = pxl.w;
}
if( threadIdx.x < PADDING && threadIdx.y < PADDING ) {
pxl = tex2D<uchar4>(fSource,gx2-fKernelSize,gy2-fKernelSize);
sidx= 4*(ly2*slen+lx2);
shared[sidx+0] = pxl.x;
shared[sidx+1] = pxl.y;
shared[sidx+2] = pxl.z;
shared[sidx+3] = pxl.w;
}
__syncthreads();
// Now that the image has been read
// into shared memory completely.
// Check for image bounds and exit
if( gx >= fImageWidth || gy >= fImageHeight )
return;
sidx = 4*(tj*slen+ti);
uchar* ptr = shared + sidx;
float4 accum = {0.0f,0.0f,0.0f,0.0f};
for( int jj=-fKernelSize; jj<=fKernelSize; jj++ )
{
for( int ii=-fKernelSize; ii<=fKernelSize; ii++ )
{
int tmpidx = 4*(jj*slen+ii);
float weight= d_kernel[(fKernelSize+jj)*klen+(fKernelSize+ii)];
accum.x += weight*ptr[tmpidx+0];
accum.y += weight*ptr[tmpidx+1];
accum.z += weight*ptr[tmpidx+2];
}
}
accum.w = shared[sidx+3];
setRGBA(fDestination,fImageWidth,fImageHeight,gx,gy,accum);
}
int ceil(int numer, int denom)
{
return (numer/denom + (numer % denom != 0));
}
MemObject::MemObject()
{
dev_SourceImage = 0;
dev_ConvolvedImage = 0;
cuImgArray = 0;
// CUDA texture specification
memset(&resDesc,0,sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
// CUDA texture object parameters
memset(&texDesc,0,sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords= 0;
texObj = 0;
}
void MemObject::cleanMemory()
{
#if USE_CUDA_TEX_OBJECT
if (texObj) {
cudaDestroyTextureObject(texObj);
}
if (cuImgArray) {
cudaFreeArray(cuImgArray);
}
cuImgArray = 0;
texObj = 0;
#else
if (dev_SourceImage) {
cudaFree(dev_SourceImage);
}
#endif
if (dev_ConvolvedImage) {
cudaFree(dev_ConvolvedImage);
}
dev_SourceImage = 0;
dev_ConvolvedImage = 0;
}
MemObject::~MemObject()
{
cleanMemory();
}
MemObject* getMemObject(void)
{
static MemObject* handle = 0;
if( handle == 0 ) {
handle = new MemObject();
}
return handle;
}
void initMemObject(void)
{
getMemObject();
}
void setKernelOnDevice(float const * elements, const int count)
{
HANDLE_ERROR( cudaMemcpyToSymbol(d_kernel, elements, count*sizeof(float)) );
CHECK_CUDA_ERRORS();
}
void setImageOnDevice(const uchar * image_data, const int image_width, const int image_height)
{
MemObject* handle = getMemObject();
handle->cleanMemory();
handle->mImageWidth = image_width;
handle->mImageHeight = image_height;
#if USE_CUDA_TEX_OBJECT
handle->channelDesc = cudaCreateChannelDesc<uchar4>();
HANDLE_ERROR( cudaMallocArray(&(handle->cuImgArray), &(handle->channelDesc), image_width, image_height) );
HANDLE_ERROR( cudaMemcpyToArray(handle->cuImgArray, 0,0, image_data,
image_width*image_height*4*sizeof(uchar),
cudaMemcpyHostToDevice) );
handle->resDesc.res.array.array = handle->cuImgArray;
cudaCreateTextureObject(&(handle->texObj),&(handle->resDesc),&(handle->texDesc),NULL);
#else
/* Allocate memory on device to hold image data */
HANDLE_ERROR( cudaMalloc((void**)&handle->dev_SourceImage,
image_width*image_height*4*sizeof(uchar)) );
CHECK_CUDA_ERRORS();
/* Copy this data to device memory for kernel computation */
HANDLE_ERROR( cudaMemcpy( handle->dev_SourceImage, image_data,
image_width*image_height*4*sizeof(uchar),
cudaMemcpyHostToDevice) );
CHECK_CUDA_ERRORS();
#endif
/* Allocate memory for output image on GPU device memory */
HANDLE_ERROR( cudaMalloc((void**)&handle->dev_ConvolvedImage,
image_width*image_height*4*sizeof(uchar)) );
CHECK_CUDA_ERRORS();
}
void convolve(const int kernel_radius)
{
static dim3 mThreadsPerBlock(TILE_WIDTH,TILE_HEIGHT);
MemObject* handle = getMemObject();
int image_width = handle->mImageWidth;
int image_height = handle->mImageHeight;
dim3 mGrid(ceil(image_width, mThreadsPerBlock.x),
ceil(image_height, mThreadsPerBlock.y));
int sharedMemSize = (mThreadsPerBlock.y+2*kernel_radius)*
(mThreadsPerBlock.x+2*kernel_radius)*
4*sizeof(uchar);
#ifdef _DEBUG_PRINTS_
std::cout<<"Threads per block "<<mThreadsPerBlock.x<<","<<mThreadsPerBlock.y<<std::endl;
std::cout<<"Blocks per grid "<<mGrid.x<<","<<mGrid.y<<std::endl;;
std::cout<<"Shared memory usage : "<<sharedMemSize<<" Bytes"<<std::endl;;
#endif
#if USE_CUDA_TEX_OBJECT
convolveKernel<<<mGrid,mThreadsPerBlock,sharedMemSize>>>(handle->texObj,
image_width,
image_height,
handle->dev_ConvolvedImage,
kernel_radius);
#else
convolveKernel<<<mGrid,mThreadsPerBlock,sharedMemSize>>>(handle->dev_SourceImage,
image_width, image_height,
handle->dev_ConvolvedImage,
kernel_radius);
#endif
CHECK_CUDA_ERRORS();
cudaDeviceSynchronize();
}
void memCpyImageDeviceToHost(uchar* host_ptr)
{
MemObject* handle = getMemObject();
HANDLE_ERROR( cudaMemcpy(host_ptr, handle->dev_ConvolvedImage,
handle->mImageWidth*handle->mImageHeight*4*sizeof(uchar),
cudaMemcpyDeviceToHost) );
CHECK_CUDA_ERRORS();
}
|
196
|
#include <cuda_runtime.h>
#include <stdio.h>
//#include <stdbool.h>
extern "C" void MeanFilterCUDA(unsigned char* h_in, unsigned char* h_out, int nKernelSize, int rows, int cols);
//template <typename T> __global__ void MeanFilterCUDAkernel(T* pInput, T* pOutput, int nKernelSize, int nHeight, int nWidth)
__global__ void MeanFilterCUDAkernel(unsigned char* pInput, unsigned char* pOutput, int nKernelSize, int nHeight, int nWidth)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = blockIdx.y;
int pos = j*nWidth + i; //pixel index
if( i>0 && i < nWidth-1 && j > 0 && j < nHeight-1) //process scope
{
float temp1;
temp1 += pInput[pos];
temp1 += pInput[pos+1];
temp1 += pInput[pos-1];
temp1 += pInput[pos - nWidth];
temp1 += pInput[pos - nWidth + 1];
temp1 += pInput[pos - nWidth - 1];
temp1 += pInput[pos + nWidth];
temp1 += pInput[pos + nWidth + 1];
temp1 += pInput[pos + nWidth - 1];
pOutput[pos] = (unsigned char)(temp1/nKernelSize);
}
else
{
pOutput[pos]=pInput[pos];
}
}
extern "C" void MeanFilterCUDA(unsigned char* h_in, unsigned char* h_out, int nKernelSize, int rows, int cols){
printf("rows_kernel: %d \n", rows);
printf("cols_kernel: %d \n", cols);
dim3 block(256,1,1);
dim3 grid((cols+255)/block.x, rows, 1);
unsigned char* d_in;
unsigned char* d_out;
cudaMalloc((void**) &d_in, rows*cols);
cudaMalloc((void**) &d_out, rows*cols);
cudaMemcpy(d_in, h_in, rows*cols*sizeof(unsigned char), cudaMemcpyHostToDevice); //input
MeanFilterCUDAkernel<<< grid, block >>>(d_in, d_out, nKernelSize, rows, cols);
cudaMemcpy(h_out, d_out, rows*cols*sizeof(unsigned char), cudaMemcpyDeviceToHost); //output
cudaFree(d_in);
cudaFree(d_out);
}
|
197
|
#include <cuda.h>
#include <iostream>
using namespace std;
__global__ void LocalMaximaKernel_CUDA(float* im_vals, unsigned short* out1, int r, int c, int z, double scale_xy, double scale_z, int offset)
{
int iGID = blockIdx.x * blockDim.x + threadIdx.x + offset; //global index
if (iGID >= r * c * z)
return;
//calculate r, c, z indices as i, j, k from global index
int rem = ((long)iGID) % (r*c);
int k = ((int)iGID-rem) / (r*c);
int j = ((long)rem) % c;
int i = (rem-j)/c;
//calculate bounds
int min_r = (int) max(0.0,i-scale_xy);
int min_c = (int) max(0.0,j-scale_xy);
int min_z = (int) max(0.0,k-scale_z);
int max_r = (int)min((float)r-1,i+scale_xy);
int max_c = (int)min((float)c-1,j+scale_xy);
int max_z = (int)min((float)z-1,k+scale_z);
//get the intensity maximum of the bounded im_vals
float mx = im_vals[(min_z*r*c)+(min_r*c)+min_c];
for(int i = min_r; i <= max_r; i++)
{
for(int j = min_c; j <= max_c; j++)
{
for(int k = min_z; k <= max_z; k++)
{
if(im_vals[(k*r*c)+(i*c)+j] > mx)
mx = im_vals[(k*r*c)+(i*c)+j];
}
}
}
//if the current pixel is at the maximum intensity, set it to 255 in out1 (seedImagePtr), else set it to 0
if(im_vals[iGID] == mx)
out1[iGID]=255;
else
out1[iGID]=0;
}
extern "C"
void Detect_Local_MaximaPoints_3D_CUDA(float* im_vals, int r, int c, int z, double scale_xy, double scale_z, unsigned short* out1)
{
cout << "Entering Detect_Local_MaximaPoints_3D_CUDA" << endl;
cudaError_t errorcode;
float* dev_im_vals;
unsigned short* dev_out1;
//cout << "Allocating " << r * c * z * sizeof(*im_vals) / (double)(1024 * 1024) << " MB of memory on device" << endl;
//Allocate memory for im_vals and out1
errorcode = cudaMalloc((void**) &dev_im_vals, r * c * z * sizeof(*im_vals));
//cout << errorcode << endl;
errorcode = cudaMalloc((void**) &dev_out1, r * c * z * sizeof(*out1));
//Copy im_vals content into device space
errorcode = cudaMemcpy(dev_im_vals, im_vals, r * c * z * sizeof(*im_vals), cudaMemcpyHostToDevice);
//cout << errorcode << endl;
//Prefer 48KB L1 cache
CUresult drivererrorcode = cuCtxSetCacheConfig(CU_FUNC_CACHE_PREFER_L1);
//cout << drivererrorcode << endl;
int device;
cudaDeviceProp device_prop;
cudaGetDevice(&device);
cudaGetDeviceProperties(&device_prop, device);
/*cout << device_prop.maxGridSize[0] << endl;
cout << device_prop.maxThreadsDim[0] << endl;*/
int threadsPerBlock = device_prop.maxThreadsDim[0];
//int threadsPerBlock = 32;
int numBlocks = device_prop.multiProcessorCount;
//int numBlocks = device_prop.maxGridSize[0];
for (int k = 0; k < r * c * z; k+= numBlocks * threadsPerBlock) //Run kernel on 16K pixels at a time
{
LocalMaximaKernel_CUDA<<< numBlocks , threadsPerBlock >>>(dev_im_vals, dev_out1, r, c, z, scale_xy, scale_z, k);
}
errorcode = cudaMemcpy(out1, dev_out1, r * c * z * sizeof(*out1), cudaMemcpyDeviceToHost);
//cout << errorcode << endl;
//Block until all precious commands are complete
cudaThreadSynchronize();
cudaFree(dev_im_vals);
cudaFree(dev_out1);
cout << cudaGetErrorString(cudaGetLastError()) << endl;
cout << "CUDA done" << endl;
}
|
198
|
#include "includes.h"
__global__ void sax_kernel_large(const float a, const float* x, float* result, unsigned int len, unsigned int rowsz) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x + blockIdx.y * rowsz;
if (idx < len) result[idx] = a * x[idx];
}
|
199
|
#include "math.h"
extern "C" const size_t SUB_MATRIX_DIM = 32;
extern "C" const size_t SUB_VECTOR_LEN = 256;
typedef struct {
size_t rows;
size_t cols;
float *elements;
} Matrix;
__device__ float *sub_block(Matrix m, int block_row, int block_col) {
return m.elements + (block_row * SUB_MATRIX_DIM * m.cols) +
(block_col * SUB_MATRIX_DIM);
}
__device__ float atomicMaxf(float *addr, float val) {
return val >= 0
? __int_as_float(atomicMax((int *)addr, __float_as_int(val)))
: __uint_as_float(atomicMax((unsigned int *)addr, __float_as_uint(val)));
}
__global__
void kernel_eq_mats(float *lhs, float *rhs, size_t len, float epsilon, bool *equal) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; *equal && i < len; i += stride)
if (fabs(lhs[i] - rhs[i]) >= epsilon)
*equal = false;
}
__global__
void kernel_add_mats(float *lhs1, float *lhs2, float *rhs, size_t len) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < len; i += stride)
rhs[i] = lhs1[i] + lhs2[i];
}
__global__
void kernel_mul_scalar_mat(float scalar, float *lhs, float *rhs, size_t len) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < len; i += stride)
rhs[i] = scalar * lhs[i];
}
__global__
void kernel_mul_mats(Matrix lhs1, Matrix lhs2, Matrix rhs) {
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int thread_row = threadIdx.y;
int thread_col = threadIdx.x;
float *rhs_sub = sub_block(rhs, block_row, block_col);
float rhs_elem_val = 0;
int block_cols =
(lhs1.cols / SUB_MATRIX_DIM) + (lhs1.cols % SUB_MATRIX_DIM != 0);
for (int sub_i = 0; sub_i < block_cols; ++sub_i) {
float *lhs1_sub = sub_block(lhs1, block_row, sub_i);
float *lhs2_sub = sub_block(lhs2, sub_i, block_col);
__shared__ float shared_lhs1_sub[SUB_MATRIX_DIM][SUB_MATRIX_DIM];
__shared__ float shared_lhs2_sub[SUB_MATRIX_DIM][SUB_MATRIX_DIM];
shared_lhs1_sub[thread_row][thread_col]
= lhs1_sub[thread_row * lhs1.cols + thread_col];
shared_lhs2_sub[thread_row][thread_col]
= lhs2_sub[thread_row * lhs2.cols + thread_col];
__syncthreads();
for (int rhs_i = 0; rhs_i < SUB_MATRIX_DIM; ++rhs_i) {
rhs_elem_val +=
shared_lhs1_sub[thread_row][rhs_i] * shared_lhs2_sub[rhs_i][thread_col];
}
__syncthreads();
}
rhs_sub[thread_row * rhs.cols + thread_col] = rhs_elem_val;
}
__global__
void kernel_transpose_mat(Matrix lhs, Matrix rhs) {
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int thread_row = threadIdx.y;
int thread_col = threadIdx.x;
float *lhs_sub = sub_block(lhs, block_row, block_col);
float *rhs_sub = sub_block(rhs, block_col, block_row);
__shared__ float shared_lhs_sub[SUB_MATRIX_DIM][SUB_MATRIX_DIM];
shared_lhs_sub[thread_row][thread_col]
= lhs_sub[thread_row * lhs.cols + thread_col];
__syncthreads();
rhs_sub[thread_col * rhs.cols + thread_row]
= shared_lhs_sub[thread_row][thread_col];
}
__global__
void kernel_dot_vecs(float *lhs1, float *lhs2, float *rhs) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float sub_vec[SUB_VECTOR_LEN];
sub_vec[threadIdx.x] = lhs1[index] * lhs2[index];
__syncthreads();
if (threadIdx.x == 0) {
float sub_vec_sum = 0.0f;
for (int i = 0; i < SUB_VECTOR_LEN; ++i)
sub_vec_sum += sub_vec[i];
atomicAdd(rhs, sub_vec_sum);
}
}
__global__
void kernel_p_norm_vec(float *lhs, float p, float *rhs) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float sub_vec[SUB_VECTOR_LEN];
sub_vec[threadIdx.x] = powf(fabsf(lhs[index]), p);
__syncthreads();
if (threadIdx.x == 0) {
float sub_vec_sum = 0.0f;
for (int i = 0; i < SUB_VECTOR_LEN; ++i)
sub_vec_sum += sub_vec[i];
atomicAdd(rhs, sub_vec_sum);
}
}
__global__
void kernel_inf_norm_vec(float *lhs, float *rhs) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float sub_vec[SUB_VECTOR_LEN];
sub_vec[threadIdx.x] = fabs(lhs[index]);
__syncthreads();
if (threadIdx.x == 0) {
float sub_vec_max = 0.0f;
for (int i = 0; i < SUB_VECTOR_LEN; ++i)
sub_vec_max = fmaxf(sub_vec_max, sub_vec[i]);
atomicMaxf(rhs, sub_vec_max);
}
}
|
200
|
#include "includes.h"
__global__ void GPU_increment_number(int* buffer, int initial)
{
buffer[0] = 1 + initial;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.